コード例 #1
0
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
//ORIGINAL LINE: public static void main(String[] args) throws java.io.IOException
        public static void Main(string[] args)
        {
            // User input parameters
            //*******************************************************************************************************************************************
            string imagePath   = "C:/Users/Mihailo/Desktop/OCR/tekst.png";    //path to the image with letters (document) for recognition
            string textPath    = "C:/Users/Mihailo/Desktop/OCR/tekst.txt";    // path to the .txt file where the recognized text will be stored
            string networkPath = "C:/Users/Mihailo/Desktop/OCR/network.nnet"; // locatoin of the trained network
            int    fontSize    = 12;                                          // fontSize, predicted by height of the letters, minimum font size is 12 pt
            int    scanQuality = 300;                                         // scan quality, minimum quality is 300 dpi
            //*******************************************************************************************************************************************

            BufferedImage    image = ImageIO.read(new File(imagePath));
            ImageFilterChain chain = new ImageFilterChain();

            chain.addFilter(new GrayscaleFilter());
            chain.addFilter(new OtsuBinarizeFilter());
            BufferedImage binarizedImage = chain.processImage(image);

            // Information about letters and text
            Letter letterInfo = new Letter(scanQuality, binarizedImage);
            //        letterInfo.recognizeDots(); // call this method only if you want to recognize dots and other litle characters, TODO
            Text textInfo = new Text(binarizedImage, letterInfo);

            OCRTextRecognition recognition = new OCRTextRecognition(letterInfo, textInfo);

            recognition.NetworkPath = networkPath;

            recognition.recognize();

            //if you want to save recognized text
            //        recognition.setRecognizedTextPath(textPath);
            //        recognition.saveText();

            Console.WriteLine(recognition.RecognizedText);
        }
コード例 #2
0
        public void initialize()
        {
            ContextualMemoryEdgeDetectionAlgorithm algorithm = new ContextualMemoryEdgeDetectionAlgorithm(colorChannels, longestContextLength, tableSizeBits, numberOfRays);
            //ContextualMemoryEdgeDetectionAlgorithmNoCollisions algorithm = new ContextualMemoryEdgeDetectionAlgorithmNoCollisions(colorChannels, longestContextLength, tableSizeBits, numberOfRays);
            //ContextualMemoryEdgeDetectionAlgorithmNoCollisionsBiasReplace algorithm =
            //    new ContextualMemoryEdgeDetectionAlgorithmNoCollisionsBiasReplace(colorChannels, longestContextLength, tableSizeBits, numberOfRays);

            ImageFilterChain filterChain = new ImageFilterChain();

            filterChain.addFilter(new CannyAppenderFilter());
            //filterChain.addFilter(new SobelAppenderFilter());
            filterChain.addFilter(new KirschAppenderFilter(1.4f, true, 32, 0));
            //filterChain.addFilter(new GaussFilter(5, 1.4f, new HashSet<ColorChannelEnum> { ColorChannelEnum.Gray }));
            filterChain.addFilter(new GaussFilterExcludeComputed(5, 1.4f, colorChannels));
            //filterChain.addFilter(new GaussFilter(5, 1.4f, colorChannels));
            algorithm.inputImageFilterChain = filterChain;

            this.algorithm = algorithm;
        }
コード例 #3
0
        public static void trainAndSaveModel(string outputModelFilename, string relativeOutputPath,
                                             int numberOfRays = 16, int rayLength = 10, int memoryBits = 24, int numberOfPasses = 1, bool useRotations = false)
        {
            ISet <ColorChannelEnum> colorChannels = new HashSet <ColorChannelEnum> {
                ColorChannelEnum.Red, ColorChannelEnum.Green, ColorChannelEnum.Blue
            };
            ISet <ColorChannelEnum> colorChannelsAndComputed = new HashSet <ColorChannelEnum> {
                ColorChannelEnum.Red, ColorChannelEnum.Green, ColorChannelEnum.Blue, ColorChannelEnum.Canny, ColorChannelEnum.Kirsch
            };

            ImageFilterChain filterChain = new ImageFilterChain();

            filterChain.addFilter(new CannyAppenderFilter());
            filterChain.addFilter(new KirschAppenderFilter());
            filterChain.addFilter(new GaussFilterExcludeComputed(5, 1.4f, colorChannels));
            //filterChain.addFilter(new MedianFilter(1, colorChannels));

            ContextualMemoryEdgeDetectionAlgorithm algorithm = new ContextualMemoryEdgeDetectionAlgorithm(colorChannelsAndComputed, rayLength, memoryBits, numberOfRays);

            //ContextualMemoryEdgeDetectionAlgorithmNoCollisions algorithm = new ContextualMemoryEdgeDetectionAlgorithmNoCollisions(colorChannelsAndComputed, rayLength, memoryBits, numberOfRays);
            //ContextualMemoryEdgeDetectionAlgorithmNoCollisionsBiasReplace algorithm =
            //    new ContextualMemoryEdgeDetectionAlgorithmNoCollisionsBiasReplace(colorChannelsAndComputed, rayLength, memoryBits, numberOfRays);
            algorithm.inputImageFilterChain = filterChain;

            BerkeleyEdgeDetectionBenchmark benchmark = new BerkeleyEdgeDetectionBenchmark(localBenchmarkPath, Path.Combine(localBenchmarkPath, relativeOutputPath), useRotations, false);
            EdgeDetectionProcessor         processor = new EdgeDetectionProcessor(benchmark, numberOfPasses, true);

            Console.WriteLine("Training started.");
            processor.train(algorithm);
            Console.WriteLine("Training finished.");

            EdgeDetectionAlgorithmUtil.saveToCompressedFile(algorithm, outputModelFilename);

            EdgeDetectionAlgorithm algorithmToTest = EdgeDetectionAlgorithmUtil.loadAlgorithmFromCompressedFile(outputModelFilename);

            Console.WriteLine("Testing started.");
            processor.test(algorithmToTest);
            Console.WriteLine("Testing finished.");

            Console.WriteLine("Validation started.");
            processor.validate();
            Console.WriteLine("Validation finished.");
        }
コード例 #4
0
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
//ORIGINAL LINE: public static void main(String[] args) throws java.io.IOException
        public static void Main(string[] args)
        {
            //     User input parameteres
            //*******************************************************************************************************************************
            string imagePath   = "C:/Users/Mihailo/Desktop/OCR/slova.png";    //path to the image with letters                        *
            string folderPath  = "C:/Users/Mihailo/Desktop/OCR/ImagesDir/";   // loaction folder for storing segmented letters           *
            string textPath    = "C:/Users/Mihailo/Desktop/OCR/slova.txt";    // path to the .txt file with text on the image          *
            string networkPath = "C:/Users/Mihailo/Desktop/OCR/network.nnet"; // location where the network will be stored     *
            int    fontSize    = 12;                                          // fontSize, predicted by height of the letters, minimum font size is 12 pt                          *
            int    scanQuality = 300;                                         // scan quality, minimum quality is 300 dpi                                                      *
            //*******************************************************************************************************************************

            BufferedImage    image = ImageIO.read(new File(imagePath));
            ImageFilterChain chain = new ImageFilterChain();

            chain.addFilter(new GrayscaleFilter());
            chain.addFilter(new OtsuBinarizeFilter());
            BufferedImage binarizedImage = chain.processImage(image);



            Letter letterInfo = new Letter(scanQuality, binarizedImage);
            //        letterInfo.recognizeDots(); // call this method only if you want to recognize dots and other litle characters, TODO

            Text texTInfo = new Text(binarizedImage, letterInfo);

            OCRTraining ocrTraining = new OCRTraining(letterInfo, texTInfo);

            ocrTraining.FolderPath       = folderPath;
            ocrTraining.TrainingTextPath = textPath;
            ocrTraining.prepareTrainingSet();



            List <string> characterLabels = ocrTraining.CharacterLabels;

            IDictionary <string, FractionRgbData> map = ImageRecognitionHelper.getFractionRgbDataForDirectory(new File(folderPath), new Dimension(20, 20));
            DataSet dataSet = ImageRecognitionHelper.createBlackAndWhiteTrainingSet(characterLabels, map);


            dataSet.FilePath = "C:/Users/Mihailo/Desktop/OCR/DataSet1.tset";
            dataSet.save();


            List <int?> hiddenLayers = new List <int?>();

            hiddenLayers.Add(12);

            NeuralNetwork   nnet = ImageRecognitionHelper.createNewNeuralNetwork("someNetworkName", new Dimension(20, 20), ColorMode.BLACK_AND_WHITE, characterLabels, hiddenLayers, TransferFunctionType.SIGMOID);
            BackPropagation bp   = (BackPropagation)nnet.LearningRule;

            bp.LearningRate = 0.3;
            bp.MaxError     = 0.1;


            //        MultiLayerPerceptron mlp = new MultiLayerPerceptron(12,13);
            //        mlp.setOutputNeurons(null);

            Console.WriteLine("Start learning...");
            nnet.learn(dataSet);
            Console.WriteLine("NNet learned");

            nnet.save(networkPath);
        }