public static XElement ToXml(ConvolutionFilter filter)
        {
            var res = new XElement(TAG_NAME,
                new XAttribute("divisor", CommonFormatter.Format(filter.Divisor)),
                new XAttribute("bias", CommonFormatter.Format(filter.Bias))
            );

            var xMatrix = new XElement("matrix");
            for (var y = 0; y < filter.MatrixY; y++) {
                var xRow = new XElement("r");
                for (var x = 0; x < filter.MatrixX; x++) {
                    var xCol = new XElement("c") { Value = CommonFormatter.Format(filter.Matrix[y, x]) };
                    xRow.Add(xCol);
                }
                xMatrix.Add(xRow);
            }
            res.Add(xMatrix);

            res.Add(new XElement("color", XColorRGBA.ToXml(filter.DefaultColor)));
            if (filter.Reserved != 0) {
                res.Add(new XAttribute("reserved", filter.Reserved));
            }
            res.Add(new XAttribute("clamp", CommonFormatter.Format(filter.Clamp)));
            res.Add(new XAttribute("preserveAlpha", CommonFormatter.Format(filter.PreserveAlpha)));
            return res;
        }
Exemple #2
0
    // Use this for initialization
    void Start()
    {
        srcBmp = new BitmapData();
        srcBmp.SetTexture2D(texture);
        srcBmp.Unlock();

        distBmp = new BitmapData(srcBmp.width, srcBmp.height, Color.black);

//		Debug.Log(FlashInt.BitmapData.UintToColor(0xffff0000));
//		Debug.Log((Color32)Color.red);
//
//		Debug.Log(FlashInt.BitmapData.ColorToUint(Color.red));
//		Debug.Log(0xffff0000);

        // edge

        int[] matrix  = new int[] { -1, -1, -1, -1, 8, -1, -1, -1, -1 };        // フィルタカーネル
        int   divisor = 1;
        int   bias    = 0;

        /*
         * // 'blur':
         * int[] matrix = new int[]{	1, 1, 1,
         *                              1, 1, 1,
         *                              1, 1, 1};
         * int divisor = 9;
         * int bias = 0;
         *
         * /*
         * //'sharpness':
         * int[] matrix = new int[]{-1, -1, -1,
         *    -1,  9, -1,
         *    -1, -1, -1};
         * int divisor = 1;
         * int bias = 0;
         */
        /*
         * // emboss
         * int[] matrix = new int[]{-2, -1, 0,
         *                                              -1,  1, 1,
         *                                              0,  1, 2};
         * int divisor = 1;
         * int bias = 0;
         */

        convolutionFilter = new ConvolutionFilter(matrix, divisor, bias);

        grayScaleFilter = new MatrixFilter(new float[] {
            0.298912f, 0.586611f, 0.114478f, 0, 0,
            0.298912f, 0.586611f, 0.114478f, 0, 0,
            0.298912f, 0.586611f, 0.114478f, 0, 0,
            0, 0, 0, 1, 0
        });
        filter = new ColorMatrixFilter(new float[] {
            -1, 0, 0, 0, 1,
            0, -1, 0, 0, 1,
            0, 0, -1, 0, 1,
            0, 0, 0, 1, 0
        });
    }
        /// <summary>
        /// Перевод оператора искажения из частотной в пространственную область. Размерность не меняется.
        /// </summary>
        /// <param name="otf">оператор искажения в частотной области (OTF - Optical Transfer Function)</param>
        /// <returns>PSF - Point Spread Function</returns>
        public static ConvolutionFilter Otf2psf(Complex[,] otf)
        {
            Complex[,] psf = Fourier.ITransform(otf);
            int FilterSize = psf.GetLength(0);
            int halfSize = (FilterSize - 1) / 2;
            int ost = FilterSize - halfSize;
            Complex[,] returnPSF = new Complex[FilterSize, FilterSize];
            //+ - -
            //- - -
            //- - -
            for (int i = 0; i < halfSize; i++)
                for (int j = 0; j < halfSize; j++)
                    returnPSF[i, j] = psf[i + ost, j + ost];
            //- + +
            //- - -
            //- - -
            for (int i = 0; i < halfSize; i++)
                for (int j = halfSize; j < FilterSize; j++)
                    returnPSF[i, j] = psf[i + ost, j - halfSize];
            //- - -
            //+ - -
            //+ - -
            for (int i = halfSize; i < FilterSize; i++)
                for (int j = 0; j < halfSize; j++)
                    returnPSF[i, j] = psf[i - halfSize, j + ost];
            //- - -
            //- + +
            //- + +
            for (int i = halfSize; i < FilterSize; i++)
                for (int j = halfSize; j < FilterSize; j++)
                    returnPSF[i, j] = psf[i - halfSize, j - halfSize];

            ConvolutionFilter cf = new ConvolutionFilter("Recovery Fiter", Converter.ToDoubleMatrix(returnPSF));
            return cf;
        }
Exemple #4
0
        public static XElement ToXml(ConvolutionFilter filter)
        {
            var res = new XElement(TAG_NAME,
                                   new XAttribute("divisor", CommonFormatter.Format(filter.Divisor)),
                                   new XAttribute("bias", CommonFormatter.Format(filter.Bias))
                                   );

            var xMatrix = new XElement("matrix");

            for (var y = 0; y < filter.MatrixY; y++)
            {
                var xRow = new XElement("r");
                for (var x = 0; x < filter.MatrixX; x++)
                {
                    var xCol = new XElement("c")
                    {
                        Value = CommonFormatter.Format(filter.Matrix[y, x])
                    };
                    xRow.Add(xCol);
                }
                xMatrix.Add(xRow);
            }
            res.Add(xMatrix);

            res.Add(new XElement("color", XColorRGBA.ToXml(filter.DefaultColor)));
            if (filter.Reserved != 0)
            {
                res.Add(new XAttribute("reserved", filter.Reserved));
            }
            res.Add(new XAttribute("clamp", CommonFormatter.Format(filter.Clamp)));
            res.Add(new XAttribute("preserveAlpha", CommonFormatter.Format(filter.PreserveAlpha)));
            return(res);
        }
        //apply filter

        private void applyFilter(Filter filter)
        {
            BitmapSource source = (BitmapSource)this.originalImage.Source;

            if (filter is ConvolutionFilter)
            {
                Bitmap            originalBitmap    = source.ConvertToBitmap();
                ConvolutionFilter convolutionFilter = (ConvolutionFilter)filter;

                bool parsed = false;
                if (this.offsetTextField.Text.Replace(" ", "").Length != 0)
                {
                    float offset = 0;
                    parsed = (float.TryParse(this.offsetTextField.Text.Replace(" ", ""), out offset));
                    if (parsed == false)
                    {
                        MessageBox.Show("Enter valid offset / offset value", "Invalid parameters", MessageBoxButton.OK, MessageBoxImage.Information);
                        return;
                    }
                    convolutionFilter.Offset = offset;
                }

                if (this.factorTextField.Text.Replace(" ", "").Length != 0)
                {
                    float factor = 0;
                    parsed = parsed && (float.TryParse(this.factorTextField.Text.Replace(" ", ""), out factor));
                    if (parsed == false)
                    {
                        MessageBox.Show("Enter valid offset / offset value", "Invalid parameters", MessageBoxButton.OK, MessageBoxImage.Information);
                        return;
                    }
                    convolutionFilter.Factor = factor;
                }

                Bitmap      filteredBitmap = originalBitmap.ConvolutionFilter(convolutionFilter);
                BitmapImage bitmapImage    = filteredBitmap.BitmapToImageSource();
                this.filteredImage.Source = bitmapImage;
            }
            else     //function filter
            {
                FunctionFilter functionFilter = (FunctionFilter)filter;
                if (functionFilter is IFunctionFilterOffset)
                {
                    IFunctionFilterOffset factorFunctionFilter = (IFunctionFilterOffset)functionFilter;
                    float coeff  = 0;
                    bool  parsed = (float.TryParse(this.coeffTextField.Text.Replace(" ", ""), out coeff));

                    if (parsed == false)
                    {
                        MessageBox.Show("Enter valid coefficient value", "Invalid parameters", MessageBoxButton.OK, MessageBoxImage.Information);
                        return;
                    }

                    factorFunctionFilter.Offset = coeff;
                }

                WriteableBitmap writeableBitmap = functionFilter.ApplyFunctionFilter((BitmapImage)this.originalImage.Source);
                this.filteredImage.Source = writeableBitmap;
            }
        }
Exemple #6
0
        public static ConvolutionFilter FromXml(XElement xFilter)
        {
            var xMatrix   = xFilter.RequiredElement("matrix");
            var xReserved = xFilter.Attribute("reserved");

            var filter = new ConvolutionFilter {
                Divisor = xFilter.RequiredDoubleAttribute("divisor"),
                Bias    = xFilter.RequiredDoubleAttribute("bias")
            };

            var xRows  = xMatrix.Elements().ToList();
            var height = xRows.Count;
            var width  = xMatrix.Elements().First().Elements().Count();

            filter.Matrix = new double[height, width];
            for (var y = 0; y < filter.MatrixY; y++)
            {
                var xRow  = xRows[y];
                var xCols = xRow.Elements().ToList();
                for (var x = 0; x < filter.MatrixX; x++)
                {
                    var xCol = xCols[x];
                    filter.Matrix[y, x] = CommonFormatter.ParseDouble(xCol.Value);
                }
            }

            filter.DefaultColor = XColorRGBA.FromXml(xFilter.RequiredElement("color").Element("Color"));
            if (xReserved != null)
            {
                filter.Reserved = byte.Parse(xReserved.Value);
            }
            filter.Clamp         = xFilter.RequiredBoolAttribute("clamp");
            filter.PreserveAlpha = xFilter.RequiredBoolAttribute("preserveAlpha");
            return(filter);
        }
        public static ConvolutionFilter FromXml(XElement xFilter)
        {
            var xMatrix = xFilter.RequiredElement("matrix");
            var xReserved = xFilter.Attribute("reserved");

            var filter = new ConvolutionFilter {
                Divisor = xFilter.RequiredDoubleAttribute("divisor"),
                Bias = xFilter.RequiredDoubleAttribute("bias")
            };

            var xRows = xMatrix.Elements().ToList();
            var height = xRows.Count;
            var width = xMatrix.Elements().First().Elements().Count();

            filter.Matrix = new double[height, width];
            for (var y = 0; y < filter.MatrixY; y++) {
                var xRow = xRows[y];
                var xCols = xRow.Elements().ToList();
                for (var x = 0; x < filter.MatrixX; x++) {
                    var xCol = xCols[x];
                    filter.Matrix[y, x] = CommonFormatter.ParseDouble(xCol.Value);
                }
            }

            filter.DefaultColor = XColorRGBA.FromXml(xFilter.RequiredElement("color").Element("Color"));
            if (xReserved != null) {
                filter.Reserved = byte.Parse(xReserved.Value);
            }
            filter.Clamp = xFilter.RequiredBoolAttribute("clamp");
            filter.PreserveAlpha = xFilter.RequiredBoolAttribute("preserveAlpha");
            return filter;
        }
        /// <summary>
        /// Processes the image.
        /// </summary>
        /// <param name="factory">
        /// The current instance of the <see cref="T:ImageProcessor.ImageFactory"/> class containing
        /// the image to process.
        /// </param>
        /// <returns>
        /// The processed image from the current instance of the <see cref="T:ImageProcessor.ImageFactory"/> class.
        /// </returns>
        public Image ProcessImage(ImageFactory factory)
        {
            Bitmap newImage = null;
            Image  image    = factory.Image;
            Tuple <IEdgeFilter, bool> parameters = this.DynamicParameter;
            IEdgeFilter filter    = parameters.Item1;
            bool        greyscale = parameters.Item2;

            try
            {
                ConvolutionFilter convolutionFilter = new ConvolutionFilter(filter, greyscale);

                // Check and assign the correct method. Don't use reflection for speed.
                newImage = filter is I2DEdgeFilter
                    ? convolutionFilter.Process2DFilter((Bitmap)image)
                    : convolutionFilter.ProcessFilter((Bitmap)image);

                image.Dispose();
                image = newImage;
            }
            catch (Exception ex)
            {
                if (newImage != null)
                {
                    newImage.Dispose();
                }

                throw new ImageProcessingException("Error processing image with " + this.GetType().Name, ex);
            }

            return(image);
        }
        /// <summary>
        /// Перевод оператора искажения из пространственной в частотную область. Размерность не меняется. 
        /// </summary>
        /// <param name="filter">Оператор искажения PSF (Point Spread Function)</param>
        /// <returns>OTF (Optical Transfer Function)</returns>
        public static Complex[,] Psf2otf(ConvolutionFilter filter)
        {
            double[,] filterMatrix = filter.normalizedFilterMatrix;
            int FilterSize = filterMatrix.GetLength(0);
            int halfSize = (FilterSize - 1) / 2;
            int ost = FilterSize - halfSize;
            double[,] newFilter = new double[FilterSize, FilterSize];
            //+ + -
            //+ + -
            //- - -
            for (int i = 0; i < ost; i++)
                for (int j = 0; j < ost; j++)
                    newFilter[i, j] = filterMatrix[i + halfSize, j + halfSize];
            //- - +
            //- - +
            //- - -
            for (int i = 0; i < ost; i++)
                for (int j = ost; j < FilterSize; j++)
                    newFilter[i, j] = filterMatrix[i + halfSize, j - ost];
            //- - -
            //- - -
            //+ + -
            for (int i = ost; i < FilterSize; i++)
                for (int j = 0; j < ost; j++)
                    newFilter[i, j] = filterMatrix[i - ost, j + halfSize];
            //- - -
            //- - -
            //- - +
            for (int i = ost; i < FilterSize; i++)
                for (int j = ost; j < FilterSize; j++)
                    newFilter[i, j] = filterMatrix[i - ost, j - ost];

            return Fourier.Transform(Converter.ToComplexMatrix(newFilter));
        }
Exemple #10
0
        // // http://rest-term.com/archives/2566/
        public void ApplyFilter(BitmapData src, Rectangle rect, Point pt, ConvolutionFilter cf)
        {
            BitmapData dst = this;
            int        w   = src.width;

            byte[] srcData = src._data;
            int[]  tmpData = new int[srcData.Length];

            int r;
            int g;
            int b;
            int i;
            int j;
            int k;
            int step;
            int kStep;

            for (int y = 1 + rect.y; y < rect.y + rect.height - 1; y++)
            {
                step = y * w;
                for (int x = 1 + rect.x; x < rect.x + rect.width - 1; x++)
                {
                    r = 0;
                    g = 0;
                    b = 0;
                    i = (step + x) << 2;
                    k = 0;
                    for (int ky = -1; ky <= 1; ky++)
                    {
                        kStep = ky * w;
                        for (int kx = -1; kx <= 1; kx++)
                        {
                            j  = (kStep << 2) + (kx << 2);
                            r += (int)(srcData [i + j + 0]) * cf.matrix [k];
                            g += (int)(srcData [i + j + 1]) * cf.matrix [k];
                            b += (int)(srcData [i + j + 2]) * cf.matrix [k++];
                        }
                    }
                    // new
                    tmpData [i + 0] = (r / cf.divisor + cf.bias);
                    tmpData [i + 1] = (g / cf.divisor + cf.bias);
                    tmpData [i + 2] = (b / cf.divisor + cf.bias);
                    tmpData [i + 3] = 255;
                }
            }

            byte[] dstData = dst._data;
            int    len     = dstData.Length;

            for (int l = 0; l < len; l++)
            {
                int val = tmpData [l];
                // clamp
                dstData[l] = (byte)((val < 0) ? 0 : (val > 255) ? 255 : val);
            }
//			this._data = tmpData;
        }
Exemple #11
0
        /// <summary>
        /// Processes the image.
        /// </summary>
        /// <param name="factory">
        /// The current instance of the <see cref="T:ImageProcessor.ImageFactory"/> class containing
        /// the image to process.
        /// </param>
        /// <returns>
        /// The processed image from the current instance of the <see cref="T:ImageProcessor.ImageFactory"/> class.
        /// </returns>
        public Image ProcessImage(ImageFactory factory)
        {
            Bitmap newImage  = null;
            Bitmap grey      = null;
            var    image     = factory.Image;
            byte   threshold = DynamicParameter;

            try
            {
                // Detect the edges then strip out middle shades.
                grey = new ConvolutionFilter(new SobelEdgeFilter(), true).Process2DFilter(image);
                grey = new BinaryThreshold(threshold).ProcessFilter(grey);

                // Search for the first white pixels
                var rectangle = ImageMaths.GetFilteredBoundingRectangle(grey, 0);
                grey.Dispose();

                newImage = new Bitmap(rectangle.Width, rectangle.Height, PixelFormat.Format32bppPArgb);
                newImage.SetResolution(image.HorizontalResolution, image.VerticalResolution);
                using (var graphics = Graphics.FromImage(newImage))
                {
                    graphics.DrawImage(
                        image,
                        new Rectangle(0, 0, rectangle.Width, rectangle.Height),
                        rectangle.X,
                        rectangle.Y,
                        rectangle.Width,
                        rectangle.Height,
                        GraphicsUnit.Pixel);
                }

                // Reassign the image.
                image.Dispose();
                image = newImage;

                if (factory.PreserveExifData && factory.ExifPropertyItems.Any())
                {
                    // Set the width EXIF data.
                    factory.SetPropertyItem(ExifPropertyTag.ImageWidth, (ushort)image.Width);

                    // Set the height EXIF data.
                    factory.SetPropertyItem(ExifPropertyTag.ImageHeight, (ushort)image.Height);
                }
            }
            catch (Exception ex)
            {
                grey?.Dispose();

                newImage?.Dispose();

                throw new ImageProcessingException("Error processing image with " + GetType().Name, ex);
            }

            return(image);
        }
        /// <summary>
        /// Processes the image.
        /// </summary>
        /// <param name="factory">
        /// The current instance of the <see cref="T:ImageProcessor.ImageFactory"/> class containing
        /// the image to process.
        /// </param>
        /// <returns>
        /// The processed image from the current instance of the <see cref="T:ImageProcessor.ImageFactory"/> class.
        /// </returns>
        public Image ProcessImage(ImageFactory factory)
        {
            Bitmap newImage  = null;
            Bitmap grey      = null;
            Image  image     = factory.Image;
            byte   threshold = this.DynamicParameter;

            try
            {
                // Detect the edges then strip out middle shades.
                grey = new ConvolutionFilter(new SobelEdgeFilter(), true).Process2DFilter(image);
                grey = new BinaryThreshold(threshold).ProcessFilter(grey);

                // Search for the first white pixels
                Rectangle rectangle = ImageMaths.GetFilteredBoundingRectangle(grey, 0);
                grey.Dispose();

                newImage = new Bitmap(rectangle.Width, rectangle.Height);
                newImage.SetResolution(image.HorizontalResolution, image.VerticalResolution);
                using (Graphics graphics = Graphics.FromImage(newImage))
                {
                    graphics.DrawImage(
                        image,
                        new Rectangle(0, 0, rectangle.Width, rectangle.Height),
                        rectangle.X,
                        rectangle.Y,
                        rectangle.Width,
                        rectangle.Height,
                        GraphicsUnit.Pixel);
                }

                // Reassign the image.
                image.Dispose();
                image = newImage;
            }
            catch (Exception ex)
            {
                if (grey != null)
                {
                    grey.Dispose();
                }

                if (newImage != null)
                {
                    newImage.Dispose();
                }

                throw new ImageProcessingException("Error processing image with " + this.GetType().Name, ex);
            }

            return(image);
        }
        /// <summary>
        /// Тихоновская регуляризация
        /// </summary>
        /// <param name="filter"> ядро искажения (PSF)</param>
        /// <returns></returns>
        public static ConvolutionFilter Filtering(ConvolutionFilter filter)
        {
            ///в частотной области
            ///fn(u,v)=((h*(u,v)/|h(u,v)|^2+gamma*|p(u,v)|^2))*g(u,v)
            ///fn - приближение
            ///h - kernel
            ///h* - комплексно-сопряженная форма kernel
            ///|h|^2 = h(u,v)*h*(u,v) = u^2+v^2*i
            ///gamma - какой-то параметр (в инверсном фильтре = 0)
            ///p(u,v) = оператор Лапласа = [{0  1  0}
            ///                             {1 -4  1}
            ///                             {0  1  0}]
            ///g - искаженное изображение

            Complex[,] otf = OpticalTransferFunction.Psf2otf(filter);
            int height = otf.GetLength(0);                                              //строк
            int width = otf.GetLength(1);                                              //столбцов
            Complex gamma = Complex.Zero;                                        //
            Complex[,] otfZ = new Complex[height, width];                                   //комплексно сопряженная матрица ядра
            Complex[,] otf2 = new Complex[height, width];                                   //матрица = |h|^2
            Complex[,] p = {{0, 1, 0,},                                          //лапласиан
                           {1, -4, 1,},
                           {0, 1, 0,},};
            p = Fourier.Transform(p);
            for (int u = 0; u < p.GetLength(0); u++)
                for (int v = 0; v < p.GetLength(1); v++)
                    p[u, v] = OpticalTransferFunction.ModPow(p[u, v]);

            for (int u = 0; u < height; u++)
                for (int v = 0; v < width; v++)
                    otfZ[u, v] = Complex.Conjugate(otf[u, v]);

            for (int u = 0; u < height; u++)
                for (int v = 0; v < width; v++)
                    otf2[u, v] = OpticalTransferFunction.ModPow(otf[u, v]);

            for (int u = 0; u < height; u++)
                for (int v = 0; v < width; v++)
                    p[u, v] = p[u, v] * gamma;

            for (int u = 0; u < height; u++)
                for (int v = 0; v < width; v++)
                    otf2[u, v] = otf2[u, v] + p[u, v];

            for (int u = 0; u < height; u++)
                for (int v = 0; v < width; v++)
                    otf[u, v] = otfZ[u, v] / otf2[u, v];

            ConvolutionFilter cf = OpticalTransferFunction.Otf2psf(otf);

            return cf;
        }
Exemple #14
0
 static Filter ReadConcreteFilter(ConvolutionFilter filter, SwfStreamReader reader)
 {
     filter.MatrixX = reader.ReadByte();
     filter.MatrixY = reader.ReadByte();
     filter.Divisor = reader.ReadFloat32();
     filter.Bias    = reader.ReadFloat32();
     filter.Matrix  = new float[filter.MatrixX * filter.MatrixY];
     for (var i = 0; i < filter.Matrix.Length; ++i)
     {
         filter.Matrix[i] = reader.ReadFloat32();
     }
     filter.DefaultColor = SwfColor.Read(reader, true);
     reader.ReadUnsignedBits(6);             // reserved
     filter.Clamp         = reader.ReadBit();
     filter.PreserveAlpha = reader.ReadBit();
     return(filter);
 }
        /// <summary>
        /// Инверсная фильтрация
        /// </summary>
        /// <param name="sourceImage"> искаженное изображение</param>
        /// <param name="filter"> оператор искажения PSF</param>
        /// <param name="outfilter">восстанавливающий фильтр</param>
        /// <returns></returns>
        public static Image Filtering(Image sourceImage, ConvolutionFilter filter, out ConvolutionFilter outfilter)
        {
            //перевод PSF в частотную область (OTF)
            Complex[,] otf = OpticalTransferFunction.Psf2otf(filter);
            //получение обратного PSF
            for (int u = 0; u < otf.GetLength(0); u++)
                for (int v = 0; v < otf.GetLength(1); v++)
                {
                    otf[u, v] = 1f / otf[u, v];
                }
            //перевод OTF обратно в пространственную область (PSF)
            outfilter = OpticalTransferFunction.Otf2psf(otf);
            //быстрая свёртка изображения с обратной PSF
            Image result = outfilter.FastConvolution(sourceImage);

            return result;
        }
Exemple #16
0
 /// <summary>
 /// Initializes a new instance of the <see cref="MainForm"/> class.
 /// </summary>
 public MainForm()
 {
     InitializeComponent();
     sourcePictureBox.Image = Program.sourceImage;
     processingThread       = new Thread(() =>
     {
         //разбываем изображения гакуссовым блюром 3х3
         SetStatus("Bluring...");
         ConvolutionFilter blur = Filters.Gaussian3x3BlurFilter;
         Image bluredImage      = Program.sourceImage.FastConvolution(blur);
         ChangeImage(bluredPictureBox, bluredImage);
         //восстанавливаем инверсным фильтром
         SetStatus("Reconstucting...");
         Image reconstucted = InverseFiltering.Filtering(bluredImage, blur);
         ChangeImage(reconstructedPictureBox, reconstucted);
         //считаем отклонения
         SetStatus("Calculating color deviation...");
         double standardColorDeviationValue = Program.sourceImage.ColorDeviation(reconstucted);
         ChangeText(standardColorDeviation, standardColorDeviationValue.ToString());
         SetStatus("Calculating transition deviation...");
         double standardTransitionDeviationValue = Program.sourceImage.TransitionDeviation(reconstucted);
         ChangeText(standardTransitionDeviation, standardTransitionDeviationValue.ToString());
         //восстанавливаем млжифицированным алгоритмом
         SetStatus("Reconstucting by modified algorithm...");
         //расширяем изображение, чтобы результат свёртки не получился меньше
         Image expendedBluredImage = bluredImage.Expand((blur.filterMatrix.GetLength(0) - 1) / 2 + 1);
         //восстанавливаем дробные части интерполяцией
         double[,,] interpolated = expendedBluredImage.Interpolate();
         //восстанавливаем тем же алгоритмом, что и был (инверсная фильтрация)
         Image modifiedReconstucted = InverseFiltering.Filtering(interpolated, blur);
         ChangeImage(modifiedReconstructedPictureBox, modifiedReconstucted);
         //считаем отклонения
         SetStatus("Calculating color deviation...");
         double modifiedColorDeviationValue = Program.sourceImage.ColorDeviation(modifiedReconstucted);
         double colorDeviationPercents      = Percents(standardColorDeviationValue, modifiedColorDeviationValue);
         ChangeText(modifiedColorDeviation, $"{modifiedColorDeviationValue} \t{colorDeviationPercents: 0.000}% better");
         SetStatus("Calculating transition deviation...");
         double modifiedTransitionDeviationValue = Program.sourceImage.TransitionDeviation(modifiedReconstucted);
         double transitionrDeviationPercents     = Percents(standardTransitionDeviationValue, modifiedTransitionDeviationValue);
         ChangeText(modifiedTransitionDeviation, $"{modifiedTransitionDeviationValue} \t{transitionrDeviationPercents: 0.000}% better");
         //выводим статус: "Закончено"
         SetStatus("Complete!");
     });
     processingThread.Start();
 }
Exemple #17
0
        public static void AreEqual(ConvolutionFilter expected, ConvolutionFilter actual, string message)
        {
            Assert.AreEqual(expected.Divisor, actual.Divisor);
            Assert.AreEqual(expected.Bias, actual.Bias);

            Assert.AreEqual(expected.MatrixX, actual.MatrixX);
            Assert.AreEqual(expected.MatrixY, actual.MatrixY);
            for (var x = 0; x < actual.MatrixX; x++) {
                for (var y = 0; y < actual.MatrixX; y++) {
                    Assert.AreEqual(expected.Matrix[y, x], actual.Matrix[y, x]);
                }
            }

            AssertColors.AreEqual(expected.DefaultColor, actual.DefaultColor, "DefaultColor");
            Assert.AreEqual(expected.Reserved, actual.Reserved);
            Assert.AreEqual(expected.Clamp, actual.Clamp);
            Assert.AreEqual(expected.PreserveAlpha, actual.PreserveAlpha);
        }
Exemple #18
0
        /// <summary>
        /// Traces the edges of a given <see cref="Image"/>.
        /// </summary>
        /// <param name="source">
        /// The source <see cref="Image"/>.
        /// </param>
        /// <param name="destination">
        /// The destination <see cref="Image"/>.
        /// </param>
        /// <param name="threshold">
        /// The threshold (between 0 and 255).
        /// </param>
        /// <returns>
        /// The a new instance of <see cref="Bitmap"/> traced.
        /// </returns>
        public static Bitmap Trace(Image source, Image destination, byte threshold = 0)
        {
            int width  = source.Width;
            int height = source.Height;

            // Grab the edges converting to greyscale, and invert the colors.
            var filter = new ConvolutionFilter(new SobelEdgeFilter(), true);

            using (Bitmap temp = filter.Process2DFilter(source))
            {
                destination = new InvertMatrixFilter().TransformImage(temp, destination);

                // Darken it slightly to aid detection
                destination = Adjustments.Brightness(destination, -5);
            }

            // Loop through and replace any colors more white than the threshold
            // with a transparent one.
            using (var destinationBitmap = new FastBitmap(destination))
            {
                Parallel.For(
                    0,
                    height,
                    y =>
                {
                    for (int x = 0; x < width; x++)
                    {
                        // ReSharper disable AccessToDisposedClosure
                        Color color = destinationBitmap.GetPixel(x, y);
                        if (color.B >= threshold)
                        {
                            destinationBitmap.SetPixel(x, y, Color.Transparent);
                        }
                        // ReSharper restore AccessToDisposedClosure
                    }
                });
            }

            // Darken it again to average out the color.
            destination = Adjustments.Brightness(destination, -5);

            return((Bitmap)destination);
        }
Exemple #19
0
        public static void AreEqual(ConvolutionFilter expected, ConvolutionFilter actual, string message)
        {
            Assert.AreEqual(expected.Divisor, actual.Divisor);
            Assert.AreEqual(expected.Bias, actual.Bias);

            Assert.AreEqual(expected.MatrixX, actual.MatrixX);
            Assert.AreEqual(expected.MatrixY, actual.MatrixY);
            for (var x = 0; x < actual.MatrixX; x++)
            {
                for (var y = 0; y < actual.MatrixX; y++)
                {
                    Assert.AreEqual(expected.Matrix[y, x], actual.Matrix[y, x]);
                }
            }

            AssertColors.AreEqual(expected.DefaultColor, actual.DefaultColor, "DefaultColor");
            Assert.AreEqual(expected.Reserved, actual.Reserved);
            Assert.AreEqual(expected.Clamp, actual.Clamp);
            Assert.AreEqual(expected.PreserveAlpha, actual.PreserveAlpha);
        }
Exemple #20
0
        /// <summary>
        /// Processes the image.
        /// </summary>
        /// <param name="factory">
        /// The current instance of the <see cref="T:ImageProcessor.ImageFactory"/> class containing
        /// the image to process.
        /// </param>
        /// <returns>
        /// The processed image from the current instance of the <see cref="T:ImageProcessor.ImageFactory"/> class.
        /// </returns>
        public Image ProcessImage(ImageFactory factory)
        {
            var image = factory.Image;
            Tuple <IEdgeFilter, bool> parameters = DynamicParameter;
            var filter    = parameters.Item1;
            var greyscale = parameters.Item2;

            try
            {
                var convolutionFilter = new ConvolutionFilter(filter, greyscale);

                // Check and assign the correct method. Don't use reflection for speed.
                return(filter is I2DEdgeFilter
                    ? convolutionFilter.Process2DFilter((Bitmap)image)
                    : convolutionFilter.ProcessFilter((Bitmap)image));
            }
            catch (Exception ex)
            {
                throw new ImageProcessingException("Error processing image with " + GetType().Name, ex);
            }
        }
Exemple #21
0
    public static void Convolve1D(ImageMap dest, ConvLinearMask mask,
                                  ImageMap src, Direction dir)
    {
        int maxN;
        int maxP;

        if (dir == Direction.Vertical)
        {
            maxN = src.XDim;
            maxP = src.YDim;
        }
        else if (dir == Direction.Horizontal)
        {
            maxN = src.YDim;
            maxP = src.XDim;
        }
        else
        {
            throw (new Exception("TODO: invalid direction"));
        }

        for (int n = 0; n < maxN; ++n)
        {
            for (int p = 0; p < maxP; ++p)
            {
                double val = ConvolutionFilter.CalculateConvolutionValue1D(src, mask,
                                                                           n, p, maxN, maxP, dir);

                if (dir == Direction.Vertical)
                {
                    dest[n, p] = val;
                }
                else
                {
                    dest[p, n] = val;
                }
            }
        }
    }
        /// <summary>
        /// Перевод оператора искажения из пространственной в частотную область с новой (бОльшей) размерностью.  
        /// </summary>
        /// <param name="filter">оператор искажения исходного размера</param>
        /// <param name="newSize">новая размерность</param>
        /// <returns></returns>
        public static Complex[,] Psf2otf(ConvolutionFilter filter, int newSize)
        {
            double[,] filterMatrix = filter.normalizedFilterMatrix;
            int sourceFilterSize = filterMatrix.GetLength(0);
            int halfSize = (filter.filterMatrix.GetLength(0) - 1) / 2;
            if (newSize < sourceFilterSize)
                return null;
            double[,] extendedFilter = new double[newSize, newSize];
            //0 0 0
            //0 0 0
            //0 0 0
            for (int i = 0; i < newSize; i++)
                for (int j = 0; j < newSize; j++)
                {
                    extendedFilter[i, j] = 0;
                }
            //- - -
            //- + +
            //- + +
            for (int i = 0; i < halfSize + 1; i++)
                for (int j = 0; j < halfSize + 1; j++)
                    extendedFilter[i, j] = filterMatrix[i + halfSize, j + halfSize];
            //- - -
            //+ - -
            //+ - -
            for (int i = 0; i < halfSize + 1; i++)
                for (int j = newSize - halfSize; j < newSize; j++)
                    extendedFilter[i, j] = filterMatrix[i + halfSize, j - (newSize - halfSize)];
            //- + +
            //- - -
            //- - -
            for (int i = newSize - halfSize; i < newSize; i++)
                for (int j = 0; j < halfSize + 1; j++)
                    extendedFilter[i, j] = filterMatrix[i - (newSize - halfSize), j + halfSize];
            //+ - -
            //- - -
            //- - -
            for (int i = newSize - halfSize; i < newSize; i++)
                for (int j = newSize - halfSize; j < newSize; j++)
                    extendedFilter[i, j] = filterMatrix[i - (newSize - halfSize), j - (newSize - halfSize)];

            return Fourier.Transform(Converter.ToComplexMatrix(extendedFilter));
        }
Exemple #23
0
 public void ApplyFilter(ConvolutionFilter cf)
 {
     ApplyFilter(this, new Rectangle(0, 0, this.width, this.height), null, cf);
 }
Exemple #24
0
    void Update()
    {
        if (applicationExiting)
        {
            return;
        }

        if (cameraTexture == null || predictionTexture == null || carController == null)
        {
            return;
        }

        // Remember currently active render texture
        RenderTexture currentActiveRT = RenderTexture.active;

        // Transfer the camera capture into the prediction texture (temporarily)
        RenderTexture.active = cameraTexture;
        predictionTexture.ReadPixels(new Rect(0, 0, _inputWidth, _inputHeight), 0, 0);
        predictionTexture.Apply();

        // Restore active render texture
        RenderTexture.active = currentActiveRT;

        // Edge Detection Convolution methods:
        // - Canny - https://en.wikipedia.org/wiki/Canny_edge_detector
        //   Laplacian of the Gaussian (LoG) - https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian
        // - Sobel-Feldman and Sharr operators - https://en.wikipedia.org/wiki/Sobel_operator
        // - Prewitt operator - https://en.wikipedia.org/wiki/Prewitt_operator
        //   Kirch operator - https://en.wikipedia.org/wiki/Kirsch_operator
        bool useSobel               = false;
        bool useCanny               = false && !useSobel;
        bool useBlur                = false && !useCanny; // Canny already includes Gaussian blurring
        bool useThreholding         = false;
        bool useGaborFilter         = false;
        bool useLineSegmentDetector = true;
        bool useFastFeatureDetector = !useLineSegmentDetector;

        // Blur entire camera image?
        if (useBlur)
        {
            Texture2D blurredTexture = ConvolutionFilter.Apply(predictionTexture, ConvolutionFilter.GaussianBlur);
            predictionTexture.SetPixels(blurredTexture.GetPixels());
        }

        // Convert from RGB space to Y'UV (ignoring chrominance)
        Color actualPixel = new Color();
        Color yuvPixel    = new Color();

        for (int x = 0; x < _inputWidth; x++)
        {
            for (int y = 0; y < _inputHeight; y++)
            {
                actualPixel = predictionTexture.GetPixel(x, y);

                // SDTV (BT.601) Y'UV conversion
                yuvPixel.r = actualPixel.r * 0.299f + actualPixel.g * 0.587f + actualPixel.b * 0.114f;   // Y' luma component

                // Chrominance
                // U = r * -0.14713 + g * -0.28886 + b * 0.436
                yuvPixel.g = 0.0f;
                // V = r * 0.615 + g * -0.51499 + b * -0.10001
                yuvPixel.b = 0.0f;

                predictionTexture.SetPixel(x, y, yuvPixel);
            }
        }

        int pixelPos;

        // Extract a portion of the camera image (half height)
        int yOffset = 16;   // Set to 0 for bottom half, _hiddenHeight for top half
        int yHeight = _hiddenHeight;

        for (int y = yOffset; y < yOffset + yHeight; y++)
        {
            for (int x = 0; x < _hiddenWidth; x++)
            {
                pixelPos = ((y - yOffset) * _hiddenWidth) + x;
                _inputField[pixelPos] = predictionTexture.GetPixel(x, y).r;
            }
        }

        if (useGaborFilter)
        {
            _openCV.GaborFilter(_inputField, 5, 4.0f, 0.0f, 10.0f, 0.5f, 0.0f);

            Color tempPixel = new Color(0.0f, 0.0f, 0.0f);
            for (int y = 0; y < yHeight; y++)
            {
                for (int x = 0; x < _hiddenWidth; x++)
                {
                    pixelPos    = (y * _hiddenWidth) + x;
                    tempPixel.r = _inputField[pixelPos];
                    tempPixel.g = tempPixel.r;
                    tempPixel.b = tempPixel.r;
                    predictionTexture.SetPixel(x, y + yHeight, tempPixel);
                }
            }
            predictionTexture.Apply();
        }

        if (useThreholding)
        {
            //_openCV.Threshold(_inputField, 0.0f, 255.0f,
            //    eogmaneo.OpenCVInterop.CV_THRESH_TOZERO | eogmaneo.OpenCVInterop.CV_THRESH_OTSU);

            _openCV.AdaptiveThreshold(_inputField, 255.0f,
                                      eogmaneo.OpenCVInterop.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
                                      eogmaneo.OpenCVInterop.CV_THRESH_BINARY,
                                      5, 2);

            Color tempPixel = new Color(0.0f, 0.0f, 0.0f);
            for (int y = 0; y < yHeight; y++)
            {
                for (int x = 0; x < _hiddenWidth; x++)
                {
                    pixelPos    = (y * _hiddenWidth) + x;
                    tempPixel.r = _inputField[pixelPos];
                    tempPixel.g = tempPixel.r;
                    tempPixel.b = tempPixel.r;
                    predictionTexture.SetPixel(x, y + yHeight, tempPixel);
                }
            }
            predictionTexture.Apply();
        }

        if (useCanny)
        {
            _openCV.CannyEdgeDetection(_inputField, 50.0f, 50.0f * 3.0f);

            Color tempPixel = new Color(0.0f, 0.0f, 0.0f);
            for (int y = 0; y < yHeight; y++)
            {
                for (int x = 0; x < _hiddenWidth; x++)
                {
                    pixelPos    = (y * _hiddenWidth) + x;
                    tempPixel.r = _inputField[pixelPos];
                    tempPixel.g = tempPixel.r;
                    tempPixel.b = tempPixel.r;
                    predictionTexture.SetPixel(x, y + yHeight, tempPixel);
                }
            }
            predictionTexture.Apply();
        }

        if (useSobel)
        {
            // Make sure that Sobel input and output uses a signed pixel data type,
            // e.g. convert after to 8-bit unsigned
            // sobelx64f = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = 5)
            // abs_sobel64f = np.absolute(sobelx64f)
            // sobel_8u = np.uint8(abs_sobel64f)

            Texture2D horzTexture = ConvolutionFilter.Apply(predictionTexture, ConvolutionFilter.Sobel3x3Horizontal);
            Texture2D vertTexture = ConvolutionFilter.Apply(predictionTexture, ConvolutionFilter.Sobel3x3Vertical);

            Texture2D convolvedTexture = new Texture2D(_inputWidth, _inputHeight, predictionTexture.format, false);
            Color     tempPixel        = new Color(0.0f, 0.0f, 0.0f);

            for (int y = yOffset; y < yOffset + yHeight; y++)
            {
                for (int x = 0; x < _hiddenWidth; x++)
                {
                    Color horzPixel = horzTexture.GetPixel(x, y);
                    Color vertPixel = vertTexture.GetPixel(x, y);

                    tempPixel.r = Mathf.Sqrt((horzPixel.r * horzPixel.r) + (vertPixel.r * vertPixel.r));
                    tempPixel.g = tempPixel.r; // Mathf.Sqrt((horzPixel.g * horzPixel.g) + (vertPixel.g * vertPixel.g));
                    tempPixel.b = tempPixel.r; // Mathf.Sqrt((horzPixel.b * horzPixel.b) + (vertPixel.b * vertPixel.b));

                    convolvedTexture.SetPixel(x, (y - yOffset) + _hiddenHeight, tempPixel);

                    pixelPos = ((y - yOffset) * _hiddenWidth) + x;
                    _inputField[pixelPos] = (int)(tempPixel.r * 255.0f);
                }
            }

            predictionTexture.SetPixels(convolvedTexture.GetPixels());
            predictionTexture.Apply();
        }

        if (useLineSegmentDetector)
        {
            // Pass filtered image into the Line Segment Detector (optionally drawing found lines),
            // and construct the rotation SDR for passing into the hierarchy
            bool drawLines = true;
            _openCV.LineSegmentDetector(_inputField, _hiddenWidth, _hiddenHeight, 6, _rotationSDR, drawLines);

            if (drawLines)
            {
                // With drawLines enabled, the _inputField gets overriden with black background
                // pixels and detected white lines drawn ontop.

                // Transfer back into the predictionTexture for display (top half, bottom will show SDRs)
                Color tempPixel = new Color(0.0f, 0.0f, 0.0f);
                for (int y = yOffset; y < yOffset + yHeight; y++)
                {
                    for (int x = 0; x < _hiddenWidth; x++)
                    {
                        pixelPos    = ((y - yOffset) * _hiddenWidth) + x;
                        tempPixel.r = _inputField[pixelPos];
                        tempPixel.g = tempPixel.r;
                        tempPixel.b = tempPixel.r;
                        predictionTexture.SetPixel(x, (y - yOffset) + _hiddenHeight, tempPixel);
                    }
                }
                predictionTexture.Apply();
            }
        }

        if (useFastFeatureDetector)
        {
            // Pass filtered image into the FAST Feature Detector (optionally drawing points found),
            // and construct the feature SDR for passing into the hierarchy
            bool drawPoints = true;
            _openCV.FastFeatureDetector(_inputField, _hiddenWidth, _hiddenHeight, 6, _rotationSDR, drawPoints, 0, 1, true);

            if (drawPoints)
            {
                // With drawPoints enabled, the _inputField gets overriden with black background
                // pixels and detected white points drawn ontop.

                // Transfer back into the predictionTexture for display (top half, bottom will show SDRs)
                Color tempPixel = new Color(0.0f, 0.0f, 0.0f);
                for (int y = yOffset; y < yOffset + yHeight; y++)
                {
                    for (int x = 0; x < _hiddenWidth; x++)
                    {
                        pixelPos    = ((y - yOffset) * _hiddenWidth) + x;
                        tempPixel.r = _inputField[pixelPos];
                        tempPixel.g = tempPixel.r;
                        tempPixel.b = tempPixel.r;
                        predictionTexture.SetPixel(x, (y - yOffset) + _hiddenHeight, tempPixel);
                    }
                }
                predictionTexture.Apply();
            }
        }

        Color predictedPixel = new Color();

        // Plot pre-encoder SDR output just underneath the input filtered image
        int onState = 0;

        for (int y = 16; y < 32; y++)
        {
            for (int x = 0; x < _inputWidth; x++)
            {
                if (x < _rotationSDR.Count)
                {
                    predictedPixel.r = _rotationSDR[x];

                    if (y == 16)
                    {
                        onState += (int)predictedPixel.r;
                    }
                }
                else
                {
                    predictedPixel.r = 0.0f;
                }

                predictedPixel.g = predictedPixel.r;
                predictedPixel.b = predictedPixel.r;

                predictionTexture.SetPixel(x, y, predictedPixel);
            }
        }

        // Plot predicted SDR output at the bottom
        int ccState = 0;

        for (int y = 0; y < 16; y++)
        {
            for (int x = 0; x < _inputWidth; x++)
            {
                if (x < _rotationSDR.Count)
                {
                    predictedPixel.r = _predictedSDR[x];

                    if (y == 0)
                    {
                        ccState += _rotationSDR[x] & _predictedSDR[x];
                    }
                }
                else
                {
                    predictedPixel.r = 0.0f;
                }

                predictedPixel.g = predictedPixel.r;
                predictedPixel.b = predictedPixel.r;

                predictionTexture.SetPixel(x, y, predictedPixel);
            }
        }
        predictionTexture.Apply();

        _onStates.Add(onState);
        _ccStates.Add(ccState);

        // Trim lists?
        if (_onStates.Count > _maxNumStates)
        {
            _onStates.RemoveAt(0);
            _ccStates.RemoveAt(0);
        }

        NCC = 0.0f;
        for (int i = 0; i < _onStates.Count; i++)
        {
            if (_ccStates[i] == 0 && _onStates[i] == 0)
            {
                NCC += 1.0f;
            }
            else if (_onStates[i] == 0)
            {
                NCC += 1.0f;
            }
            else
            {
                NCC += (float)_ccStates[i] / (float)_onStates[i];
            }
        }
        NCC /= (float)_onStates.Count;

        // Encode scalar values from the car controller
        Steer = carController.CurrentSteerAngle / carController.m_MaximumSteerAngle;
        Accel = carController.AccelInput;
        Brake = carController.BrakeInput;

        //for (int i = 0; i < 6 * 6; i++)
        //    _inputValues[i] = 0;
        //int index = (int)((Steer * 0.5f + 0.5f) * (6 * 6 - 1) + 0.5f);
        //_inputValues[index] = 1;

        _inputValues[0] = (int)((Steer * 0.5f + 0.5f) * (6.0f * 6.0f - 1.0f) + 0.5f);

        // Setup the hierarchy input vector
        Std2DVeci input = new Std2DVeci();

        input.Add(_rotationSDR);
        input.Add(_inputValues);

        // Step the hierarchy
        _hierarchy.step(input, _system, Training);

        StdVeci predictions = _hierarchy.getPrediction(0);

        for (int i = 0; i < _predictedSDR.Count; i++)
        {
            _predictedSDR[i] = predictions[i];
        }

        // Wait for physics to settle
        if (_time < 1.0f)
        {
            _time += Time.deltaTime;

            // Apply hand brake
            carSteer  = 0.0f;
            carAccel  = 0.0f;
            carBrake  = -1.0f;
            HandBrake = 1.0f;
        }
        else
        {
            // Release hand brake
            HandBrake = 0.0f;

            Accel = -1.0f;
            Brake = Accel;

            // Update the car controller

            StdVeci steeringPredictions = _hierarchy.getPrediction(1);

            //int maxIndex = 0;
            //for (int i = 1; i < 6 * 6; i++)
            //    if (steeringPredictions[i] > steeringPredictions[maxIndex])
            //        maxIndex = i;
            //PredictedSteer = (float)(maxIndex) / (float)(6 * 6 - 1) * 2.0f - 1.0f;
            PredictedSteer = (steeringPredictions[0] / (6.0f * 6.0f - 1.0f)) * 2.0f - 1.0f;

            PredictedAccel = Accel;
            PredictedBrake = Brake;

            carSteer = PredictedSteer;
            carAccel = PredictedAccel;
            carBrake = PredictedBrake;

            // Search along the spline for the closest point to the current car position
            float   bestT = 0.0f, minDistance = 100000.0f;
            Vector3 carPosition = carController.gameObject.transform.localPosition;

            // When not training use the track spline
            BezierSpline spline = trackSpline;

            if (Training)
            {
                spline = splineList[SplineIndex];
            }

            float totalDistance = 0.0f;

            for (float t = 0.0f; t <= 1.0f; t += 0.001f)
            {
                Vector3 position     = spline.GetPoint(t);
                Vector3 positionPrev = spline.GetPoint(t - 0.001f);

                float distance = Vector3.Distance(position, carPosition);

                totalDistance += Vector3.Distance(position, positionPrev);

                if (distance <= minDistance)
                {
                    minDistance = distance;
                    bestT       = t;
                }
            }

            // Assume +-2 units is maximum distance the car is allowed to be from the center spline
            NCC = Mathf.Max(0.0f, NCC - (1.0f - ((2.0f - Vector3.Distance(carPosition, spline.GetPoint(bestT))) / 2.0f)));
            //NCC = ((2.0f - Vector3.Distance(carPosition, spline.GetPoint(bestT))) / 2.0f);

            // Reset car position and direction?
            if (Input.GetKeyUp(KeyCode.R) || carController.Collided)
            {
                if (ForcePredictionMode == false)
                {
                    Training = true;
                }

                carController.ResetCollided();

                // Spline 0 is usually set as the spline used to create the track
                SplineIndex = 0;

                Vector3 position = spline.GetPoint(bestT);
                position.y = carController.gameObject.transform.localPosition.y;
                carController.gameObject.transform.localPosition = position;

                Vector3 splineDirection = spline.GetDirection(bestT).normalized;
                carController.gameObject.transform.forward = -splineDirection;
            }

            // Toggle training on iff too divergent?
            if (Training == false && ForcePredictionMode == false && NCC < 0.25f)
            {
                Training = true;
            }

            // Toggle training off iff quite confident?
            if (Training == true && NCC > 0.85f && LapCount >= initialTrainingLaps)
            {
                Training = false;
            }

            if (carController.CurrentSpeed < 2.0f)
            {
                Training = true;
            }

            if (Training)
            {
                _trainingCount++;
            }
            else
            {
                _predictingCount++;
            }

            if (Training && spline != null)
            {
                Vector3 carDirection = -carController.gameObject.transform.forward.normalized;

                Vector3 targetPosition = spline.GetPoint(bestT + (SteerAhead / totalDistance));

                //Vector3 splineDirection = spline.GetDirection(bestT).normalized;

                Vector3 targetDirection = (targetPosition - carPosition).normalized;

                float angle = (1.0f - Vector3.Dot(carDirection, targetDirection));// * Mathf.Rad2Deg;

                Vector3 right  = Vector3.Cross(carDirection, Vector3.up);
                float   angle2 = Vector3.Dot(right, targetDirection);

                float newCarSteer = Mathf.Exp(256.0f * angle) - 1.0f;

                if (Mathf.Abs(minDistance) > 0.01f)//newCarSteer > Mathf.PI / 64.0f)
                {
                    newCarSteer += angle2 * Mathf.Abs(minDistance);
                }

                if (angle2 > 0.0f)
                {
                    newCarSteer = -newCarSteer;
                }

                if (newCarSteer > 1.0f)
                {
                    newCarSteer = 1.0f;
                }
                else
                if (newCarSteer < -1.0f)
                {
                    newCarSteer = -1.0f;
                }

                float steerBlend = 0.5f;
                carSteer = (steerBlend * newCarSteer) + ((1.0f - steerBlend) * carSteer);

                if (enableDebugLines)
                {
                    debugLinePositions[0] = carController.gameObject.transform.localPosition;
                    debugLinePositions[1] = debugLinePositions[0] + carDirection * 10.0f;
                    debugLinePositions[2] = carController.gameObject.transform.localPosition;
                    debugLinePositions[3] = debugLinePositions[2] + targetDirection * 10.0f;
                    debugLine.SetPositions(debugLinePositions);
                }
            }

            float totalCount = _trainingCount + _predictingCount;

            if (totalCount == 0.0f)
            {
                TrainingPercent   = 1.0f;
                PredictionPercent = 0.0f;
            }
            else
            {
                TrainingPercent   = (float)_trainingCount / totalCount;
                PredictionPercent = (float)_predictingCount / totalCount;
            }

            if (bestT < prevBestT)
            {
                LapCount++;

                _trainingCount   = 0;
                _predictingCount = 0;

                if ((LapCount % lapsPerSpline) == 0)
                {
                    SplineIndex++;

                    if (SplineIndex >= splineList.Length)
                    {
                        SplineIndex = 0;
                    }
                }
            }

            prevBestT = bestT;
        }

        if (connectToNeoVis && _neoVis != null)
        {
            _neoVis.update(0.01f);
        }

        if (userControl)
        {
            // Control overides
            // pass the input to the car!
            float h = CrossPlatformInputManager.GetAxis("Horizontal");
            float v = CrossPlatformInputManager.GetAxis("Vertical");
#if !MOBILE_INPUT
            float handbrake = CrossPlatformInputManager.GetAxis("Jump");
#endif
            carSteer  = h;
            carAccel  = v;
            carBrake  = v;
            HandBrake = handbrake;
        }

        // Toggle training mode?
        if (Input.GetKeyUp(KeyCode.T))
        {
            Training            = !Training;
            ForcePredictionMode = false;
        }
        else
        // Force prediction mode?
        if (Input.GetKeyUp(KeyCode.F))
        {
            Training            = false;
            ForcePredictionMode = true;
        }

        // Save out the current state of the hierarchy?
        if (Input.GetKeyUp(KeyCode.O) && hierarchyFileName.Length > 0)
        {
            _hierarchy.save(hierarchyFileName);
            print("Saved OgmaNeo hierarchy to " + hierarchyFileName);
        }
    }
Exemple #25
0
    void Update()
    {
        if (applicationExiting)
        {
            return;
        }

        if (cameraTexture == null || predictionTexture == null || carController == null)
        {
            return;
        }

        ogmaneo.Vec2i pixelPos = new Vec2i();

        // Remember currently active render texture
        RenderTexture currentActiveRT = RenderTexture.active;

        // Transfer the camera capture into the prediction texture (temporarily)
        RenderTexture.active = cameraTexture;
        predictionTexture.ReadPixels(new Rect(0, 0, _inputWidth, _inputHeight), 0, 0);
        predictionTexture.Apply();

        // Restore active render texture
        RenderTexture.active = currentActiveRT;

        // Transfer the RGB camera texture into ValueField2D fields
        Color actualPixel = new Color();
        Color yuvPixel    = new Color(0.0f, 0.0f, 0.0f);

        for (int x = 0; x < _inputWidth; x++)
        {
            for (int y = 0; y < _inputHeight; y++)
            {
                actualPixel = predictionTexture.GetPixel(x, y);

                // SDTV (BT.601) Y'UV conversion
                yuvPixel.r = actualPixel.r * 0.299f + actualPixel.g * 0.587f + actualPixel.b * 0.114f;   // Y' luma component

                // Chrominance
                // U = r * -0.14713 + g * -0.28886 + b * 0.436
                //yuvPixel.g = 0.0f;
                // V = r * 0.615 + g * -0.51499 + b * -0.10001
                //yuvPixel.b = 0.0f;

                predictionTexture.SetPixel(x, y, yuvPixel);
            }
        }

        // Edge Detection Convolution methods:
        //   Laplacian of the Gaussian (LoG) - https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian
        // - Sobel-Feldman and Sharr operators - https://en.wikipedia.org/wiki/Sobel_operator
        // - Prewitt operator - https://en.wikipedia.org/wiki/Prewitt_operator
        //   Kirch operator - https://en.wikipedia.org/wiki/Kirsch_operator
        Texture2D horzTexture = ConvolutionFilter.Apply(predictionTexture, ConvolutionFilter.Sobel3x3Horizontal); // ConvolutionFilter.Prewitt3x3Horizontal);
        Texture2D vertTexture = ConvolutionFilter.Apply(predictionTexture, ConvolutionFilter.Sobel3x3Vertical);   // ConvolutionFilter.Prewitt3x3Vertical);

        Texture2D convolvedTexture = new Texture2D(_inputWidth, _inputHeight, predictionTexture.format, false);
        Color     tempPixel        = new Color(0.0f, 0.0f, 0.0f);

        for (int x = 0; x < _inputWidth; x++)
        {
            for (int y = 0; y < _inputHeight; y++)
            {
                Color horzPixel = horzTexture.GetPixel(x, y);
                Color vertPixel = vertTexture.GetPixel(x, y);

                tempPixel.r = Mathf.Sqrt((horzPixel.r * horzPixel.r) + (vertPixel.r * vertPixel.r));
                tempPixel.g = tempPixel.r; // Mathf.Sqrt((horzPixel.g * horzPixel.g) + (vertPixel.g * vertPixel.g));
                tempPixel.b = tempPixel.r; // Mathf.Sqrt((horzPixel.b * horzPixel.b) + (vertPixel.b * vertPixel.b));

                convolvedTexture.SetPixel(x, y, tempPixel);
            }
        }

        predictionTexture.SetPixels(convolvedTexture.GetPixels());
        predictionTexture.Apply();

        // Transfer the RGB camera texture into ValueField2D fields
        for (int x = 0; x < _inputWidth; x++)
        {
            for (int y = 0; y < _inputHeight; y++)
            {
                actualPixel = predictionTexture.GetPixel(x, y);

                pixelPos.x = x;
                pixelPos.y = y;

                _inputField.setValue(pixelPos, actualPixel.r);

                previousImage[x, y] = sourceImage[x, y];
                sourceImage[x, y]   = actualPixel.r;// * 0.299f + actualPixel.g * 0.587f + actualPixel.b * 0.114f;
            }
        }

        // Encode scalar values from the car controller
        Steer = carController.CurrentSteerAngle / carController.m_MaximumSteerAngle;
        Accel = carController.AccelInput;
        Brake = carController.BrakeInput;

        pixelPos.x = 0;
        pixelPos.y = 0;
        _inputValues.setValue(pixelPos, Steer);

        // Setup the hierarchy input vector
        vectorvf inputVector = new vectorvf();

        inputVector.Add(_inputField);
        inputVector.Add(_inputValues);

        // Step the hierarchy
        _hierarchy.activate(inputVector);

        if (Training)
        {
            _hierarchy.learn(inputVector);
        }

        // Grab the predictions vector
        vectorvf prediction = _hierarchy.getPredictions();

        // Transfer the ValueField2D fields into the RGB prediction texture
        Color predictedPixel = new Color();

        for (int x = 0; x < _inputWidth; x++)
        {
            for (int y = 0; y < _inputHeight; y++)
            {
                pixelPos.x = x;
                pixelPos.y = y;

                predictedPixel.r = prediction[0].getValue(pixelPos);
                predictedPixel.g = predictedPixel.r; // prediction[1].getValue(pixelPos);
                predictedPixel.b = predictedPixel.r; // prediction[2].getValue(pixelPos);

                predictionTexture.SetPixel(x, y, predictedPixel);

                predictedImage[x, y] = predictedPixel.r;// * 0.299f + predictedPixel.g * 0.587f + predictedPixel.b * 0.114f;
            }
        }
        predictionTexture.Apply();

        // Wait for physics to settle
        if (_time < 1.0f)
        {
            _time += Time.deltaTime;

            // Apply hand brake
            carSteer  = 0.0f;
            carAccel  = 0.0f;
            carBrake  = -1.0f;
            HandBrake = 1.0f;
        }
        else
        {
            // Release hand brake
            HandBrake = 0.0f;

            Accel = -1.0f;
            Brake = Accel;

            pixelPos.x = 0;
            pixelPos.y = 0;

            // Update the car controller
            PredictedSteer = prediction[1].getValue(pixelPos);
            PredictedAccel = Accel;
            PredictedBrake = Brake;

            carSteer = PredictedSteer;// * carController.m_MaximumSteerAngle;
            carAccel = PredictedAccel;
            carBrake = PredictedBrake;

            // Search along the spline for the closest point to the current car position
            float   bestT = 0.0f, minDistance = 100000.0f;
            Vector3 carPosition = carController.gameObject.transform.localPosition;

            // When not training use the track spline
            BezierSpline spline = trackSpline;

            if (Training)
            {
                spline = splineList[SplineIndex];
            }

            float totalDistance = 0.0f;

            for (float t = 0.0f; t <= 1.0f; t += 0.001f)
            {
                Vector3 position     = spline.GetPoint(t);
                Vector3 positionPrev = spline.GetPoint(t - 0.001f);

                float distance = Vector3.Distance(position, carPosition);

                totalDistance += Vector3.Distance(position, positionPrev);

                if (distance <= minDistance)
                {
                    minDistance = distance;
                    bestT       = t;
                }
            }

            // Reset car position and direction?
            if (Input.GetKeyUp(KeyCode.R) || carController.Collided)
            {
                if (ForcePredictionMode == false)
                {
                    Training = true;
                }

                carController.ResetCollided();

                // Spline 0 is usually set as the spline used to create the track
                SplineIndex = 0;

                Vector3 position = spline.GetPoint(bestT);
                carController.gameObject.transform.localPosition = position;

                Vector3 splineDirection = spline.GetDirection(bestT).normalized;
                carController.gameObject.transform.forward = -splineDirection;
            }

            // Determine the difference between the input image (t) and predicted image (t+1)
            CalculateNormalizedCrossCorrelation();

            // Toggle training on iff too divergent?
            if (Training == false && ForcePredictionMode == false && NCC < 0.25f)
            {
                Training = true;
            }

            // Toggle training off iff quite confident?
            if (Training == true && NCC > 0.85f && LapCount >= initialTrainingLaps)
            {
                Training = false;
            }

            if (carController.CurrentSpeed < 2.0f)
            {
                Training = true;
            }

            if (Training)
            {
                _trainingCount++;
            }
            else
            {
                _predictingCount++;
            }

            if (Training && spline != null)
            {
                Vector3 carDirection = -carController.gameObject.transform.forward.normalized;

                Vector3 targetPosition = spline.GetPoint(bestT + SteerAhead / totalDistance);

                //Vector3 splineDirection = spline.GetDirection(bestT).normalized;

                Vector3 targetDirection = (targetPosition - carPosition).normalized;

                float angle = (1.0f - Vector3.Dot(carDirection, targetDirection));// * Mathf.Rad2Deg;

                Vector3 right  = Vector3.Cross(carDirection, Vector3.up);
                float   angle2 = Vector3.Dot(right, targetDirection);

                float newCarSteer = Mathf.Exp(256.0f * angle) - 1.0f;

                if (Mathf.Abs(minDistance) > 0.01f)//newCarSteer > Mathf.PI / 64.0f)
                {
                    newCarSteer += angle2 * Mathf.Abs(minDistance);
                }

                if (angle2 > 0.0f)
                {
                    newCarSteer = -newCarSteer;
                }

                if (newCarSteer > 1.0f)
                {
                    newCarSteer = 1.0f;
                }
                else
                if (newCarSteer < -1.0f)
                {
                    newCarSteer = -1.0f;
                }

                float steerBlend = 0.75f;
                carSteer = (steerBlend * newCarSteer) + ((1.0f - steerBlend) * carSteer);

                if (enableDebugLines)
                {
                    debugLinePositions[0] = carController.gameObject.transform.localPosition;
                    debugLinePositions[1] = debugLinePositions[0] + carDirection * 10.0f;
                    debugLinePositions[2] = carController.gameObject.transform.localPosition;
                    debugLinePositions[3] = debugLinePositions[2] + targetDirection * 10.0f;
                    debugLine.SetPositions(debugLinePositions);
                }
            }

            float totalCount = _trainingCount + _predictingCount;

            if (totalCount == 0.0f)
            {
                TrainingPercent   = 1.0f;
                PredictionPercent = 0.0f;
            }
            else
            {
                TrainingPercent   = (float)_trainingCount / totalCount;
                PredictionPercent = (float)_predictingCount / totalCount;
            }

            if (bestT < prevBestT)
            {
                LapCount++;

                _trainingCount   = 0;
                _predictingCount = 0;

                if ((LapCount % lapsPerSpline) == 0)
                {
                    SplineIndex++;

                    if (SplineIndex >= splineList.Length)
                    {
                        SplineIndex = 0;
                    }
                }
            }

            prevBestT = bestT;
        }

        if (userControl)
        {
            // Control overides
            // pass the input to the car!
            float h = CrossPlatformInputManager.GetAxis("Horizontal");
            float v = CrossPlatformInputManager.GetAxis("Vertical");
#if !MOBILE_INPUT
            float handbrake = CrossPlatformInputManager.GetAxis("Jump");
#endif
            carSteer  = h;
            carAccel  = v;
            carBrake  = v;
            HandBrake = handbrake;
        }

        // Toggle training?
        if (Input.GetKeyUp(KeyCode.T))
        {
            Training            = !Training;
            ForcePredictionMode = false;
        }
        else
        // Force prediction mode?
        if (Input.GetKeyUp(KeyCode.F))
        {
            Training            = false;
            ForcePredictionMode = true;
        }

        // Save out the current state of the hierarchy?
        if (Input.GetKeyUp(KeyCode.O) && hierarchyFileName.Length > 0)
        {
            _hierarchy.save(_res.getComputeSystem(), hierarchyFileName);
            print("Saved OgmaNeo hierarchy to " + hierarchyFileName);
        }
    }
        /// <summary>
        /// Винеровская фильтрация
        /// </summary>
        /// <param name="sourceImage">исходное изображение</param>
        /// <param name="bluredImage">искаженное изображение</param>
        /// <param name="filter">PSF</param>
        /// <param name="noise">Шум</param>
        /// <returns></returns>
        public static Image Filtering(Image sourceImage, Image bluredImage, ConvolutionFilter filter, byte[,] noise)
        {
            int height = sourceImage.Height;
            int width = sourceImage.Width;
            Complex[,] otf = OpticalTransferFunction.Psf2otf(filter);
            for (int u = 0; u < otf.GetLength(0); u++)
                for (int v = 0; v < otf.GetLength(1); v++)
                {
                    otf[u, v] = 1f / otf[u, v];
                }
            int filterSize = filter.filterMatrix.GetLength(0);       //размер PSF
            int filterHalfSize = (filterSize - 1) / 2 + 1;                //центр PSF
            ConvolutionFilter cf = OpticalTransferFunction.Otf2psf(otf);

            Image expandedBluredImage = bluredImage.Expand(filterHalfSize);
            Image expandedSourceImage = sourceImage.Expand(filterHalfSize);
            int expHeight = expandedSourceImage.Height;
            int expWidth = expandedSourceImage.Width;
            double[] expImage = Converter.ToDoubleArray(expandedBluredImage);
            double[] expSourceImage = Converter.ToDoubleArray(expandedSourceImage);
            double[,] bluredRed = new double[expHeight, expWidth];
            double[,] bluredGreen = new double[expHeight, expWidth];
            double[,] bluredBlue = new double[expHeight, expWidth];
            double[,] sourceRed = new double[expHeight, expWidth];
            double[,] sourceGreen = new double[expHeight, expWidth];
            double[,] sourceBlue = new double[expHeight, expWidth];

            int index = 0;
            for (int i = 0; i < expHeight; i++)
                for (int j = 0; j < expWidth; j++)
                {
                    bluredBlue[i, j] = expImage[index];
                    bluredGreen[i, j] = expImage[index + 1];
                    bluredRed[i, j] = expImage[index + 2];
                    sourceBlue[i, j] = expSourceImage[index];
                    sourceGreen[i, j] = expSourceImage[index + 1];
                    sourceRed[i, j] = expSourceImage[index + 2];
                    index += 4;
                }

            //перевод в частотную область
            //искаженное изображение
            Complex[,] bluredRedFourier = Fourier.FastTransform(Converter.ToComplexMatrix(bluredRed));
            Complex[,] bluredGreenFourier = Fourier.FastTransform(Converter.ToComplexMatrix(bluredGreen));
            Complex[,] bluredBlueFourier = Fourier.FastTransform(Converter.ToComplexMatrix(bluredBlue));
            //исходное изображение
            Complex[,] redSpectrum = Fourier.FastTransform(Converter.ToComplexMatrix(sourceRed));
            Complex[,] greenSpectrum = Fourier.FastTransform(Converter.ToComplexMatrix(sourceGreen));
            Complex[,] blueSpectrum = Fourier.FastTransform(Converter.ToComplexMatrix(sourceBlue));
            //шум и PSF
            Complex[,] noiseSpectrum = Fourier.FastTransform(Converter.ToComplexMatrix(noise));
            int newSize = bluredBlueFourier.GetLength(0);
            double[,] kernel_1 = cf.ExpendedByZero(newSize);
            Complex[,] kernel_1Fourier = Fourier.FastTransform(Converter.ToComplexMatrix(kernel_1));
            double[,] kernel = filter.ExpendedByZero(newSize);
            Complex[,] kernelFourier = Fourier.FastTransform(Converter.ToComplexMatrix(kernel));
            Complex[,] kernelFourierPow = new Complex[newSize, newSize];////добавить функцию
            for (int u = 0; u < newSize; u++)
                for (int v = 0; v < newSize; v++)
                {
                    kernelFourierPow[u, v] = OpticalTransferFunction.ModPow(kernelFourier[u, v]);

                    bluredRedFourier[u, v] *= ((kernel_1Fourier[u, v]) * (kernelFourierPow[u, v] / (kernelFourierPow[u, v] + noiseSpectrum[u, v] / redSpectrum[u, v])));
                    bluredGreenFourier[u, v] *= ((kernel_1Fourier[u, v]) * (kernelFourierPow[u, v] / (kernelFourierPow[u, v] + noiseSpectrum[u, v] / greenSpectrum[u, v])));
                    bluredBlueFourier[u, v] *= ((kernel_1Fourier[u, v]) * (kernelFourierPow[u, v] / (kernelFourierPow[u, v] + noiseSpectrum[u, v] / blueSpectrum[u, v])));
                }
            Complex[,] newRed = Fourier.IFastTransform(bluredRedFourier, expHeight, expWidth);
            Complex[,] newGreen = Fourier.IFastTransform(bluredGreenFourier, expHeight, expWidth);
            Complex[,] newBlue = Fourier.IFastTransform(bluredBlueFourier, expHeight, expWidth);

            Complex[,] resRed = new Complex[height, width];
            Complex[,] resGreen = new Complex[height, width];
            Complex[,] resBlue = new Complex[height, width];

            int resultSize = height * width * 4;
            Complex[] resultImage = new Complex[resultSize];
            index = 0;
            for (int i = 0; i < height; i++)
                for (int j = 0; j < width; j++)
                {
                    resRed[i, j] = Math.Round(newRed[i + filterHalfSize + 1, j + filterHalfSize + 1].Real);
                    resGreen[i, j] = Math.Round(newGreen[i + filterHalfSize + 1, j + filterHalfSize + 1].Real);
                    resBlue[i, j] = Math.Round(newBlue[i + filterHalfSize + 1, j + filterHalfSize + 1].Real);
                }
            Image result = Converter.ToImage(resRed, resGreen, resBlue);

            return result;
        }
Exemple #27
0
 public ImageMap Convolve(ImageMap img)
 {
     return(ConvolutionFilter.Convolve(img, mask));
 }
    public override void OnInspectorGUI()
    {
        ConvolutionFilter filter = target as ConvolutionFilter;

        if (filter == null)
        {
            return;
        }

        float lastKernelFactor = filter.kernelFactor;

        base.OnInspectorGUI();

        if (lastKernelFactor != filter.kernelFactor)
        {
            filter.filterType = ConvFilterType.CustomFilter;
        }

        float gridItemWidth = 50f;
        int   rows          = 3;
        int   cols          = 3;

        float[] kernel = filter.GetCurrentKernelWeights();

        float[] kernelValues = new float[kernel.Length];
        for (int i = 0; i < kernelValues.Length; i++)
        {
            kernelValues[i] = kernel[i];
        }

        GUILayout.BeginVertical();
        for (int y = 0; y < rows; y++)
        {
            GUILayout.BeginHorizontal();
            for (int x = 0; x < cols; x++)
            {
                kernelValues[x + y * cols] = EditorGUILayout.FloatField(GUIContent.none,
                                                                        kernelValues[x + y * cols], GUILayout.Width(gridItemWidth));
                if (kernelValues[x + y * cols] != kernel[x + y * cols])
                {
                    filter.filterType = ConvFilterType.CustomFilter;
                }
            }
            GUILayout.EndHorizontal();
        }
        GUILayout.EndVertical();

        if (filter.filterType == ConvFilterType.CustomFilter)
        {
            filter.kernelweights = kernelValues;
        }
        else
        {
            filter.kernelFactor = 1f;
        }

        if (GUILayout.Button("Apply filter"))
        {
            filter.ApplyFilter();
        }
    }
Exemple #29
0
        private static Image EntropyCropFull(Image image, byte treshold)
        {
            Bitmap newImage = null;
            Bitmap grey     = null;

            try
            {
                // Detect the edges then strip out middle shades.
                grey = new ConvolutionFilter(new SobelEdgeFilter(), true).Process2DFilter(image);
                grey = new BinaryThreshold(treshold).ProcessFilter(grey);

                // Search for the first white pixels
                Rectangle rectangle = GetFilteredBoundingRectangle(grey, 0, RgbaComponent.R);
                grey.Dispose();

                newImage = new Bitmap(rectangle.Width, rectangle.Height, PixelFormat.Format32bppPArgb);
                newImage.SetResolution(image.HorizontalResolution, image.VerticalResolution);
                using (Graphics graphics = Graphics.FromImage(newImage))
                {
                    graphics.DrawImage(
                        image,
                        new Rectangle(0, 0, rectangle.Width, rectangle.Height),
                        rectangle.X,
                        rectangle.Y,
                        rectangle.Width,
                        rectangle.Height,
                        GraphicsUnit.Pixel);
                }

                // Reassign the image.
                image.Dispose();
                image = newImage;

                return(image);

                //if (factory.PreserveExifData && factory.ExifPropertyItems.Any())
                //{
                //    // Set the width EXIF data.
                //    factory.SetPropertyItem(ExifPropertyTag.ImageWidth, (ushort)image.Width);

                //    // Set the height EXIF data.
                //    factory.SetPropertyItem(ExifPropertyTag.ImageHeight, (ushort)image.Height);
                //}
            }
            catch (Exception ex)
            {
                if (grey != null)
                {
                    grey.Dispose();
                }

                if (newImage != null)
                {
                    newImage.Dispose();
                }

                throw new Exception("Error processing image", ex);
            }

            return(image);
        }
Exemple #30
0
    /// <summary>
    /// Добавления размытия
    /// </summary>
    private void AddBlur(object sender = null, EventArgs e = null)
    {
        if (blurThread != null)
            blurThread.Abort();
        if (sourceImage == null)                                    //если входного изображения нет
        {
            LoadImage();                                            //грузим его
            if (sourceImage == null)                                //если его всё равно нет (пользователь отказался)
                return;                                             //уходим
        }
        //обнуляем следующие этапы
        ChangeImage(noisePictureBox, null);
        ChangeImage(recoveredPictureBox, null);
        ChangeText(noiseSizeText, "-");
        ChangeText(recoveredSizeText, "-");
        ChangeImage(noiseMaskPictureBox, null);
        ChangeImage(recoveryKernalPictureBox, null);

        if (sender == null)                                         //если функция была вызвана не кнопкой
        {
            blur = Filters.CopyFilter;                              //выставляем фильтр точной копии
            blurPictureBox.Image = sourceImage;                     //сразу скопируем то же изображение
        }
        else
        {
            switch (((Button)sender).Name)                          //выбираем фильтр в зависимости от названия кнопки
            {
                case "Gaussian3x3Button":
                    blur = Filters.Gaussian3x3BlurFilter;
                    break;
                case "Gaussian5x5Button":
                    blur = Filters.Gaussian5x5BlurFilter;
                    break;
            }
        }
        blurThread = new Thread(() =>
        {
            ChangeImage(blurPictureBox, IRIntegration.Properties.Resources.Loading);
            ChangeImage(blurKernalPictureBox, IRIntegration.Properties.Resources.Loading);
            if (noiseThread != null) noiseThread.Abort();
            if (reconstructionThread != null) reconstructionThread.Abort();
            //проводим свёртку
            ChangeImage(blurPictureBox, blur.Convolution(new Bitmap(sourceImage), ConvolutionFilter.ConvolutionMode.expand));
            //выведем размеры и ядро искажения
            ChangeText(bluredSizeText, blurPictureBox.Image.Height + " x" + blurPictureBox.Image.Width);
            ChangeImage(blurKernalPictureBox, Converter.ToImage(Mull(blur.normalizedFilterMatrix, 255)));
        });
        blurThread.Start();
    }
Exemple #31
0
 public XElement Visit(ConvolutionFilter filter, object arg)
 {
     return(XConvolutionFilter.ToXml(filter));
 }