Пример #1
0
        public void SwitchQuadrants(UMat img)
        {
            ///accepts 2-channel images.
            //select even-sized ROI, if it has an odd number of rows or columns, which
            //leaves the right and bottom edges the wrong value...
            //does nothing to 1d matrixes as region of interest is
            //set with 0 width and height.
            using (img = new UMat(img, new Rectangle(0, 0, img.Cols & -2, img.Rows & -2))) {
                // rearrange the quadrants of Fourier image  so that the origin is at the image center
                int cx = img.Cols / 2;                //centre x and y
                int cy = img.Rows / 2;

                using (UMat q0 = new UMat(img, new Rectangle(0, 0, cx, cy)),                  // Top-Left - Create a ROI per quadrant
                       q1 = new UMat(img, new Rectangle(cx, 0, cx, cy)),                      // Top-Right
                       q2 = new UMat(img, new Rectangle(0, cy, cx, cy)),                      // Bottom-Left
                       q3 = new UMat(img, new Rectangle(cx, cy, cx, cy))) {                   // Bottom-Right
                    UMat tmp = quarterImg32f2c;

                    q0.CopyTo(tmp);                    // swap quadrants (Top-Left with Bottom-Right)
                    q3.CopyTo(q0);
                    tmp.CopyTo(q3);
                    q1.CopyTo(tmp);                    // swap quadrant (Top-Right with Bottom-Left)
                    q2.CopyTo(q1);
                    tmp.CopyTo(q2);
                }
            }
        }
Пример #2
0
        public async Task Can_resize_bilinear_gray()
        {
            var platformId = GetPlatforms().First();
            var deviceId   = GetPlatformDevices(platformId).First();
            var contextId  = CreateContext(platformId, new[] { deviceId });
            var queueId    = CreateCommandQueue(contextId, deviceId);

            var src    = new UMat();
            var sample = Samples.sample00;
            var w      = sample.Width;
            var h      = sample.Height;

            using var bitmap = sample.ToMat();
            bitmap.CopyTo(src);

            using var gray = new UMat();
            CvInvoke.CvtColor(src, gray, ColorConversion.Bgra2Gray);

            var grayArray = new byte[w * h];

            gray.CopyTo(grayArray);

            var grayBufferId   = CopyBuffer(contextId, grayArray.AsSpan());
            var resultBufferId = OclMemoryPool.Shared.RentBuffer <byte>(contextId, w * 2 * h * 2);

            var eventId = await _resizeAlgorithms.BilinearGray(queueId, w, h, w * 2, h * 2, new OclBuffer <byte>(grayBufferId), new OclBuffer <byte>(resultBufferId));

            var resultArray = new byte[w * 2 * h * 2];

            EnqueueReadBuffer(queueId, resultBufferId, false, 0, w * 2 * h * 2, resultArray.AsSpan(), stackalloc nint[] { eventId });
Пример #3
0
        public TekBorderAnalyzer(UMat matGray, OCVGridDefinition gridDef)
        {
            Matrix <Byte> matrix = new Matrix <Byte>(matGray.Rows, matGray.Cols, matGray.NumberOfChannels);

            matGray.CopyTo(matrix);
            LeftBorderValues = FindRowValues(matrix, gridDef, (int)(gridDef.ColSize * 0.1));
            TopBorderValues  = FindColValues(matrix, gridDef, (int)(gridDef.RowSize * 0.1));
            LeftAreaBorders  = AnalyzeBorderValues(LeftBorderValues, ref HorizontalThreshold);
            TopAreaBorders   = AnalyzeBorderValues(TopBorderValues, ref VerticalTreshold);
        }
Пример #4
0
 public void Bgr2Gray(UMat i, UMat o)
 {
     //if not grayscale, convert bgr image to grayscale
     if (i.NumberOfChannels != 1)
     {
         CvInvoke.CvtColor(i, o, ColorConversion.Bgr2Gray);
     }
     else
     {
         i.CopyTo(o);
     }
 }
        public void SwitchQuadrants(UMat img)
        {
            // crop the spectrum, if it has an odd number of rows or columns
            img = new UMat(img, new Rectangle(0, 0, img.Cols & -2, img.Rows & -2));

            // rearrange the quadrants of Fourier image  so that the origin is at the image center
            int cx = img.Cols / 2;
            int cy = img.Rows / 2;

            UMat q0  = new UMat(img, new Rectangle(0, 0, cx, cy));       // Top-Left - Create a ROI per quadrant
            UMat q1  = new UMat(img, new Rectangle(cx, 0, cx, cy));      // Top-Right
            UMat q2  = new UMat(img, new Rectangle(0, cy, cx, cy));      // Bottom-Left
            UMat q3  = new UMat(img, new Rectangle(cx, cy, cx, cy));     // Bottom-Right
            UMat tmp = new UMat();                                       // swap quadrants (Top-Left with Bottom-Right)

            q0.CopyTo(tmp);
            q3.CopyTo(q0);
            tmp.CopyTo(q3);
            q1.CopyTo(tmp);                                // swap quadrant (Top-Right with Bottom-Left)
            q2.CopyTo(q1);
            tmp.CopyTo(q2);
        }
Пример #6
0
        private void SaveRaw(object sender, EventArgs e)
        {
            ToolStripButton toolStripButton = (ToolStripButton)sender;
            Form            form            = toolStripButton.GetCurrentParent().FindForm();
            // Call the save file dialog to enter the file name of the image
            SaveFileDialog saveFileDialog1 = new SaveFileDialog();

            saveFileDialog1.Filter           = "XML files(*.xml) | *.xml";
            saveFileDialog1.FilterIndex      = 1;
            saveFileDialog1.RestoreDirectory = true;

            if (saveFileDialog1.ShowDialog() == DialogResult.OK)
            {
                using (Mat mat = new Mat()) {
                    switch (form.Text)
                    {
                    case "CamView2":                             //rawData.Save(saveFileDialog1.FileName);
                        rawData.CopyTo(mat);
                        break;

                    case "PhaseView":                             //phFo.Save(saveFileDialog1.FileName);
                        phFo.CopyTo(mat);
                        break;

                    case "FTView1":                             //magT.Save(saveFileDialog1.FileName);
                        magT.CopyTo(mat);
                        break;

                    case "IntensityView":                             //magFo.Save(saveFileDialog1.FileName);
                        magFo.CopyTo(mat);
                        break;

                    default:
                        MessageBox.Show("A Error has occur");
                        break;
                    }
                    //XML saving supported only by this
                    using (FileStorage fs = new FileStorage(saveFileDialog1.FileName, FileStorage.Mode.Write)) {
                        fs.Write(mat, "top");
                    }
                }
            }
        }
Пример #7
0
        public virtual void dispImg(UMat inImg)
        {
            UMat toUpdate = (imageBox1.Image == disp_1) ? disp_2 : disp_1;

            if (!showColorMap)
            {
                inImg.CopyTo(toUpdate);
            }
            else
            {
                CvInvoke.ApplyColorMap(inImg, toUpdate, ColorMapType.Jet);
            }
            if (run1disp_1 == 0 || run1disp_2 == 0)
            {
                run1disp_1 = disp_1.Bytes[0];
                run1disp_2 = disp_2.Bytes[0];
            }
            imageBox1.Image = toUpdate;
        }
Пример #8
0
 public void TestAccumulateWeighted()
 {
    int startValue = 50;
    Image<Gray, Single> img1 = new Image<Gray, float>(100, 40, new Gray(100));
    Image<Gray, Single> acc = new Image<Gray, float>(100, 40, new Gray(startValue));
    //IImage img = img2;
    //img1.AccumulateWeighted(acc, 0.5);
    CvInvoke.AccumulateWeighted(img1, acc, 0.3, null);
    TestOpenCL(delegate
             {
                UMat src = img1.ToUMat();
                UMat result = new UMat(img1.Rows, img1.Cols, CvEnum.DepthType.Cv32F, 1);
                
                result.SetTo(new MCvScalar(startValue), null);
                //IImage img = img2;
                //img1.AccumulateWeighted(result, 0.5);
                CvInvoke.AccumulateWeighted(src, result, 0.3, null);
                Image<Gray, Single> tmp = new Image<Gray, float>(img1.Size);
                result.CopyTo(tmp, null);
                CvInvoke.AbsDiff(acc, result, result);
                int nonZeroCount = CvInvoke.CountNonZero(result);
                EmguAssert.IsTrue(nonZeroCount == 0);
             });
 }
Пример #9
0
        public void ProcessAddPhDiffNInverseT(
            double rDist,
            double sensorPxWidth,
            double sensorPxHeight,
            double wavelength,
            UMat magFoT,
            UMat phFoT,
            Rectangle ctrRoi,
            Point ctrRoiMaxMagLoc,
            UMat magFo,
            UMat phFo,
            ref UMat unitPhDiffDM
            )
        {
            ///Based on a bunch of parameters, calculates the phase difference at each point
            ///between the aperture(hologram) plane and the image plane. It adds this phase
            ///difference to each plane wave(point on the fourier spectrum), turning the fourier
            ///spectrum to one at the image plane, and inverse DFTs to get the predicted magnitude
            ///and phase of light at the image plane, as magFo and phFo.

            //if unassigned, generate unit phase difference matrix
            if (unitPhDiffDM == null)
            {
                unitPhDiffDM = GetUnitPhDiffDoubleM(phFoT.Size, phFoT.Size, sensorPxWidth, sensorPxHeight, wavelength);
            }
            else
            {
                //if new phase transform image is wider or higher than current
                //(giant) unitPhDiff matrix, dispose current giant matrix and
                //generate a bigger one
                if (phFoT.Rows > unitPhDiffDM.Rows || phFoT.Cols > unitPhDiffDM.Cols)
                {
                    int biggerRows = Math.Max(phFoT.Rows, unitPhDiffDM.Rows);
                    int biggerCols = Math.Max(phFoT.Cols, unitPhDiffDM.Cols);
                    unitPhDiffDM.Dispose();
                    unitPhDiffDM = GetUnitPhDiffDoubleM(new Size(biggerCols, biggerRows), phFoT.Size,
                                                        sensorPxWidth, sensorPxHeight, wavelength);
                }
            }

            //Calculate phase difference matrix to add to sinewaves at particular (x,y)s.
            //phDiffM = rDist * unitPhDiffM, but first select the right region on unitPhDiffM.
            Point     unitPhDiff0HzLoc = new Point(unitPhDiffDM.Cols / 2, unitPhDiffDM.Rows / 2);
            Rectangle unitPhDiffCtrRoi = new Rectangle(unitPhDiff0HzLoc - (Size)ctrRoiMaxMagLoc, ctrRoi.Size);

            //get ROI on the (giant) unitPhDiffM and multiply rDist by only that part.
            //the output goes into this.phDiffFM.
            UMat phDiffFM = this.phDiffFM;

            using (UMat unitPhDiffDMCtr = new UMat(unitPhDiffDM, unitPhDiffCtrRoi)) {
                unitPhDiffDMCtr.ConvertTo(phDiffFM, DepthType.Cv32F, alpha: rDist);                //convert phDiffDM to float to make same type as
                //phFoT, we add them together later
            }

            //if video mode is off, add phase difference to a copy of the first-order-only image,
            //because we don't want to modify the input and feed the output back to the input as
            //this function is called again when one of the parameters changes
            UMat phFoT2Mod;             //phFoT to modify

            if (videoMode)
            {
                phFoT2Mod = phFoT;
            }
            else
            {
                phFoT2Mod = magFo;                //we're gonna update it later anyway
                phFoT.CopyTo(phFoT2Mod);
            }
            using (UMat phFoT2ModCtr = new UMat(phFoT2Mod, ctrRoi)) {
                CvInvoke.Add(phFoT2ModCtr, phDiffFM, phFoT2ModCtr, dtype: DepthType.Cv32F);
            }

            //convert magnitude and phase Transform parts at the image plane
            //to real and imaginary parts and Inverse (fourier) Transform to
            //get magnitude and phase at the image plane (which is what we want!)
            UMat reFoT = magFo, imFoT = phFo;                      //we're gonna update it later anyway

            CvInvoke.PolarToCart(magFoT, phFoT2Mod, reFoT, imFoT); //reusing memory here.
            InverseT(reFoT, imFoT, magFo, phFo);                   //reusing memory here as well.
        }
Пример #10
0
        public void ProcessCopyOverNCenter(UMat magT, UMat phT, Rectangle selRoi,
                                           out double selRoiMinMagVal, out double selRoiMaxMagVal,
                                           out Point selRoiMinMagLoc, out Point selRoiMaxMagLoc,
                                           out Rectangle ctrRoi, out Point ctrRoiMaxMagLoc,
                                           out Rectangle ltdSelRoi,
                                           UMat magFoT, UMat phFoT)
        {
            ///Takes the selected roi, copies and pastes it
            ///onto a zero image with its maximum value at the bottom-right
            ///of the centre, updates magFoT and phFoT.

            //create UMats filled with '0's to "paste" the first-order's Transform into
            magFoT.Create(magT.Rows, magT.Cols, DepthType.Cv32F, 1);
            magFoT.SetTo(new MCvScalar(0));
            magFoT.CopyTo(phFoT);
            if (_ManualCentering)
            {
                selRoiMinMagVal = 0;
                selRoiMaxMagVal = 0;
                selRoiMinMagLoc = new Point(0, 0);
                selRoiMaxMagLoc = fTView.GetManualCent() - (Size)selRoi.Location;
            }
            else
            {
                using (UMat magSelT = new UMat(magT, selRoi)) {
                    // get the values and locations of maximum and minimum points
                    // for each channel, but we only have 1 channel
                    // so we only use the [0] index.
                    magSelT.MinMax(out double[] selRoiMinMagValues, out double[] selRoiMaxMagValues,
                                   out Point[] selRoiMinMagLocations, out Point[] selRoiMaxMagLocations);
                    selRoiMinMagVal = selRoiMinMagValues[0];
                    selRoiMaxMagVal = selRoiMaxMagValues[0];
                    selRoiMinMagLoc = selRoiMinMagLocations[0];
                    selRoiMaxMagLoc = selRoiMaxMagLocations[0];
                    if (_WeightCentering)
                    {
                        using (UMat Mask = magSelT.Clone()) {
                            UMat aaaaa = new UMat();
                            Mask.SetTo(new MCvScalar((long)(selRoiMaxMagVal * 0.3)));
                            CvInvoke.Compare(magSelT, Mask, aaaaa, CmpType.GreaterEqual);
                            Moments m = CvInvoke.Moments(aaaaa, true);
                            selRoiMaxMagLoc = new Point((int)(m.M10 / m.M00), (int)(m.M01 / m.M00));
                        }
                    }
                }
            }
            //find 0Hz point in image, which is at the bottom-right of the centre,
            //because the width and height are even numbers as I cropped them
            //earlier in SwitchQuadrants(). There's no +1 because coordinates
            //start at index 0.
            Point foT0HzLoc = new Point(magFoT.Cols / 2, magFoT.Rows / 2);

            //calculate ctrRoi on foT to paste into, where the copied's max value
            //is at foT's 0Hz point.
            ctrRoi = new Rectangle(foT0HzLoc - (Size)selRoiMaxMagLoc, selRoi.Size);
            //it's possible for ctrRoi to go out of the image if you select
            //a region more than (width or height)/2, and the max value point
            //is at a corner, so we limit it. However, this means that the sizes
            //of selRoi and ctrRoi are different, so we limit selRoi too.
            LimitRoiToImage2(magFoT, ref ctrRoi);
            //calculate ltdSelRoi by going backwards and "pasting" ctrRoi
            //on magT.
            //find the new maxMagLoc in ctrRoi (it might change after limiting)
            ctrRoiMaxMagLoc = foT0HzLoc - (Size)ctrRoi.Location;
            Point selRoiMaxMagAbsLoc = selRoi.Location + (Size)selRoiMaxMagLoc;            //location relative to origin of magT

            ltdSelRoi = new Rectangle(selRoiMaxMagAbsLoc - (Size)ctrRoiMaxMagLoc,
                                      ctrRoi.Size);
            ;
            //finally, copy ltdSelRoi in T to ctrRoi in foT
            using (UMat magLtdSelT = new UMat(magT, ltdSelRoi),
                   phLtdSelT = new UMat(phT, ltdSelRoi),
                   magCtrFoT = new UMat(magFoT, ctrRoi),
                   phCtrFoT = new UMat(phFoT, ctrRoi)) {
                magLtdSelT.CopyTo(magCtrFoT);
                phLtdSelT.CopyTo(phCtrFoT);
            }
        }
Пример #11
0
        private void testingpixels(OCVGridDefinition gridDef)
        {
            int threshold = 250;

            Matrix <Byte> matrix = new Matrix <Byte>(uimage.Rows, uimage.Cols, uimage.NumberOfChannels);

            uimage.CopyTo(matrix);
            using (StreamWriter sw = new StreamWriter("pixels.dmp"))
            {
                sw.WriteLine("------------------ROWS-------------------");
                for (int r = 0; r < gridDef.Rows; r++)
                {
                    int rowLoc = (int)(gridDef.RowLocation(r) + gridDef.RowSize * 0.5);
                    sw.WriteLine("*** row: {0}    (loc: {1})", r, rowLoc);
                    for (int c = 1; c < gridDef.Cols; c++)
                    {
                        int loc = gridDef.ColLocation(c);
                        sw.WriteLine("col {0}  location {1}", c, loc);
                        int nBelow = 0;
                        for (int col = loc - 10; col < loc + 10; col++)
                        {
                            if (col < 0 || col >= matrix.Cols)
                            {
                                continue;
                            }
                            byte value = matrix.Data[rowLoc, col];
                            if (value < threshold)
                            {
                                nBelow++;
                            }
                            //sw.WriteLine("row: {0}  col: {1}  : {2}", row, col, value);
                        }
                        sw.WriteLine("col {0}  pct: {1} %", c, (nBelow / 20.0) * 100);
                    }
                    sw.WriteLine("***");
                }
                sw.WriteLine("------------------COLUMNS-------------------");
                for (int c = 0; c < gridDef.Cols; c++)
                {
                    int colLoc = (int)(gridDef.ColLocation(c) + gridDef.ColSize * 0.5);
                    sw.WriteLine("*** col: {0}    (loc: {1})", c, colLoc);
                    for (int r = 1; r < gridDef.Rows; r++)
                    {
                        int loc = gridDef.RowLocation(r);
                        sw.WriteLine("row {0}  location {1}", r, loc);
                        int nBelow = 0;
                        for (int row = loc - 10; row < loc + 10; row++)
                        {
                            if (row < 0 || row >= matrix.Cols)
                            {
                                continue;
                            }
                            byte value = matrix.Data[row, colLoc];
                            if (value < threshold)
                            {
                                nBelow++;
                            }
                            //sw.WriteLine("row: {0}  col: {1}  : {2}", row, col, value);
                        }
                        sw.WriteLine("row {0}  pct: {1} %", r, (nBelow / 20.0) * 100);
                    }
                    sw.WriteLine("***");
                }
            }
        }
        public void ProcessSelRectToEnd(Rectangle selRect, UMat magT, UMat phT)
        {
            ///Takes the selected rectangle portion, crops it and pastes it onto a zero image with its maximum value at the bottom-right centre,
            ///calls the next function.
            ///Previous name was ProcessSelRectToPasteRect

            UMat magSelT = new UMat(magT, selRect);
            UMat phSelT  = new UMat(phT, selRect);

            //gets the values and locations of maximum and minimum points for each channel, but we only have 1 channel
            //so we only use the [0] index.
            magSelT.MinMax(out double[] minMagValues, out selRectMaxMagValues,
                           out Point[] minMagLocations, out selRectMaxMagLocations);


            //create empty UMats to "paste" the first-order spectrum into
            if (magFoT != null)
            {
                magFoT.Dispose();
            }
            magFoT = new UMat(magT.Size, DepthType.Cv32F, 1);             //magnitude of First-Order only frequency spectrum image
            magFoT.SetTo(new MCvScalar(0));
            if (phFoT != null)
            {
                phFoT.Dispose();
            }
            phFoT = magFoT.Clone();
            //find 0Hz point in image.
            //0Hz point is at the bottom-right of the centre, because the width and height are even numbers as I
            //cropped them earlier in SwitchQuadrants(). There's no +1 because coordinates start at index 0.
            Point foT0HzPoint = new Point(magFoT.Cols / 2, magFoT.Rows / 2);

            //find centered ROI on first-order-only, where the maximum value is placed at the 0Hz point on FoT
            this.ctrRect = new Rectangle(foT0HzPoint.Minus(selRectMaxMagLocations[0]), selRect.Size);

            LimitRectToWithinImage(magFoT, ref ctrRect);

            UMat magCtrFoT = new UMat(magFoT, ctrRect);
            UMat phCtrFoT  = new UMat(phFoT, ctrRect);

            magSelT.CopyTo(magCtrFoT);
            phSelT.CopyTo(phCtrFoT);
            //dispose the UMat headers. I don't think they will free the memory unless it's the last UMat accessing the memory.
            magSelT.Dispose();
            phSelT.Dispose();
            magCtrFoT.Dispose();
            phCtrFoT.Dispose();

            /*
             * //debugging: create a clone for displaying, process for displaying and display.
             * //I made it complicated because normalisation and log should be done only in the ctredROI,
             * //as any other area is just a 0.
             * //I believe that speeds up the processing for display.
             * UMat MagFoTCpy = magFoT.Clone();
             * UMat magCtrFoTCpy = new UMat(MagFoTCpy, ctrRect);
             * Norm4Disp(magCtrFoTCpy, log: true, norm: false);
             * CvInvoke.Normalize(magCtrFoTCpy, magCtrFoTCpy, 0, 255, NormType.MinMax);
             * MagFoTCpy.ConvertTo(MagFoTCpy, DepthType.Cv8U);
             * imageBox3.Image = MagFoTCpy;
             */

            //continue the chain
            ProcessAddPhaseToEnd(
                this.rDist,
                this.sensorPixelWidth,
                this.sensorPixelHeight,
                this.wavelength,
                this.magFoT,
                this.phFoT,
                this.ctrRect,
                this.selRectMaxMagLocations[0]);
        }
Пример #13
0
      public void TestDenseHistogram3()
      {
         UMat img = new UMat(400, 400, DepthType.Cv8U, 3);
         CvInvoke.Randu(img, new MCvScalar(), new MCvScalar(255, 255, 255));
         UMat hist = new UMat();
         using (VectorOfUMat vms = new VectorOfUMat(img))
         {
            CvInvoke.CalcHist(vms, new int[] { 0, 1, 2 }, null, hist, new int[] { 20, 20, 20 },
               new float[] { 0, 255, 0, 255, 0, 255 }, true);
            byte[] bytes = hist.Bytes;
            hist.SetTo(bytes);

            float[] bins = new float[20 * 20 * 20];
            hist.CopyTo(bins);
         }
      }
Пример #14
0
 public void TestSobelScharr()
 {
    Mat img = EmguAssert.LoadMat("lena.jpg");
    Mat result = new Mat();
    CvInvoke.Sobel(img, result, CvEnum.DepthType.Cv8U, 1, 0, -1, 1.0);
    TestOpenCL(delegate
             {
                UMat uresult = new UMat();
                using (UMat um = img.GetUMat(AccessType.ReadWrite))
                {
                   Stopwatch watch = Stopwatch.StartNew();
                   CvInvoke.Sobel(img, uresult, CvEnum.DepthType.Cv8U, 1, 0, -1, 1.0, 0.0, CvEnum.BorderType.Default);
                   watch.Stop();
                   Trace.WriteLine(String.Format("Sobel completed in {0} milliseconds. (OpenCL: {1})", watch.ElapsedMilliseconds, CvInvoke.UseOpenCL));
                   uresult.CopyTo(result, null);
                }
                //Emgu.CV.UI.ImageViewer.Show(result);
             });
 }
Пример #15
0
        private void processVideo(object sender, DoWorkEventArgs e)
        {
            CvInvoke.UseOpenCL    = true;
            CvInvoke.UseOptimized = true;

            //determine amount of maps already present in the world
            int     mapCount = 0;
            NbtFile map      = null;

            try {
                map      = new NbtFile(Path.Join(WorldFolderPath, "\\data\\idcounts.dat"));
                mapCount = Int32.Parse(map.RootTag.Get <NbtCompound>("data").Get <NbtInt>("map").Value.ToString()) + 1;
            } catch { }
            Debug.Write("MapCount:" + mapCount + "\n");

            //start videocapture
            VideoCapture video = new VideoCapture(VideoFilePath);

            //get framerate
            framerate = video.GetCaptureProperty(CapProp.Fps);
            //get framecount
            frames = video.GetCaptureProperty(CapProp.FrameCount);

            //calculate allFrames at the target framerate
            reducedFrames = Math.Floor((frames / framerate) * TargetFrameRate);

            if (map != null)
            {
                map.RootTag.Get <NbtCompound>("data").Get <NbtInt>("map").Value = mapCount + ((int)reducedFrames) * 15;
                map.SaveToFile(Path.Join(WorldFolderPath, "\\data\\idcounts.dat"), NbtCompression.None);
            }

            //create Preset for map data
            NbtCompound preset = new NbtCompound("")
            {
                new NbtCompound("data")
                {
                    new NbtString("dimension", "minecraft:overworld"),
                    new NbtLong("xCenter", 128),
                    new NbtLong("zCenter", 128),
                    new NbtByte("scale", 3),
                    new NbtByte("locked", 1),
                    new NbtByte("unlimitedTracking", 0),
                    new NbtByte("trackingPosition", 0),
                    new NbtByteArray("colors")
                },
                new NbtInt("DataVersion", 2584)
            };

            //create path to output folder
            string mapOutputFolder = Path.Join(WorldFolderPath, "/data");

            UMat ones = new UMat(1, 3, DepthType.Cv8U, 1, UMat.Usage.AllocateDeviceMemory);

            ones.SetTo(new MCvScalar(1));


            UMat calculation   = new UMat(new Size(640, 384), DepthType.Cv32S, 3, UMat.Usage.AllocateDeviceMemory);
            UMat singleChannel = new UMat(new Size(640, 384), DepthType.Cv32S, 1, UMat.Usage.AllocateDeviceMemory);

            //keeps lowest value
            UMat lowestDiversion = new UMat(new Size(640, 384), DepthType.Cv32S, 1, UMat.Usage.AllocateDeviceMemory);
            //bool
            UMat lessDiversion = new UMat(new Size(640, 384), DepthType.Cv8U, 1, UMat.Usage.AllocateDeviceMemory);
            //store block value
            UMat blocks = new UMat(new Size(640, 384), DepthType.Cv8U, 1, UMat.Usage.AllocateDeviceMemory);


            while (frame < reducedFrames)
            {
                //calculate position in video and set to next frame
                position = frame / reducedFrames;
                video.SetCaptureProperty(CapProp.PosFrames, Math.Round(position * frames));

                var watch = System.Diagnostics.Stopwatch.StartNew();

                //get video frame
                if (!video.Read(singleFrame))
                {
                    break;
                }

                //resize to minecraft compatible resolution
                CvInvoke.Resize(singleFrame, singleFrame, new Size(640, 384));
                singleFrame.ConvertTo(singleFrame, DepthType.Cv32F);

                //display current Frame to user
                if (PreviewPicture.Image != null)
                {
                    PreviewPicture.Image.Dispose();
                }
                PreviewPicture.Image = singleFrame.ToBitmap();

                lowestDiversion.SetTo(new MCvScalar(255));
                lessDiversion.SetTo(new MCvScalar(0));
                blocks.SetTo(Colors.minecraftColors[Colors.minecraftColors.Length - 1]);

                for (int i = 0; i < Colors.minecraftColors.Length; i++)
                {
                    calculation = singleFrame - Colors.minecraftColors[i];
                    CvInvoke.Multiply(calculation, calculation, calculation);
                    CvInvoke.Transform(calculation, singleChannel, ones);

                    CvInvoke.Sqrt(singleChannel, singleChannel);
                    singleChannel.ConvertTo(singleChannel, DepthType.Cv32S);

                    CvInvoke.Compare(singleChannel, lowestDiversion, lessDiversion, CmpType.LessThan);

                    singleChannel.CopyTo(lowestDiversion, lessDiversion);

                    blocks.SetTo(new MCvScalar(i + 4), lessDiversion);
                }

                for (int y = 0; y < 3; y++)
                {
                    for (int x = 0; x < 5; x++)
                    {
                        UMat output = new UMat(blocks, new Rectangle(128 * x, 128 * y, 128, 128));
                        preset.Get <NbtCompound>("data").Get <NbtByteArray>("colors").Value = output.Bytes;

                        NbtFile file = new NbtFile(preset);
                        file.SaveToFile(Path.Join(mapOutputFolder, String.Concat("map_", mapCount + (frame * 15) + (y * 5 + x), ".dat")), NbtCompression.None);
                    }
                }

                watch.Stop();
                elapsedTime = watch.ElapsedMilliseconds;
                Debug.Write("Took:" + elapsedTime + "\n");

                System.GC.Collect();

                //send progress update to ui and console
                worker.ReportProgress((int)Math.Round(position * 100), elapsedTime * (reducedFrames - frame));
                Debug.Write(frame + "/" + reducedFrames + "-" + position + "\n");

                //increase framecount
                frame++;
            }

            worker.ReportProgress(100, 0.0);

            //pass the values to display to the user
            e.Result = new convertResult((int)frame - 1, mapCount);
        }