public static void speakerAdaptiveDecoding(ArrayList speakers, URL url)
        {
            Configuration configuration = new Configuration();

            configuration.setAcousticModelPath("resource:/edu/cmu/sphinx/models/en-us/en-us");
            configuration.setDictionaryPath("resource:/edu/cmu/sphinx/models/en-us/cmudict-en-us.dict");
            configuration.setLanguageModelPath("resource:/edu/cmu/sphinx/models/en-us/en-us.lm.bin");
            StreamSpeechRecognizer streamSpeechRecognizer = new StreamSpeechRecognizer(configuration);
            Iterator iterator = speakers.iterator();

            while (iterator.hasNext())
            {
                SpeakerCluster speakerCluster   = (SpeakerCluster)iterator.next();
                Stats          stats            = streamSpeechRecognizer.createStats(1);
                ArrayList      speakerIntervals = speakerCluster.getSpeakerIntervals();
                Iterator       iterator2        = speakerIntervals.iterator();
                while (iterator2.hasNext())
                {
                    Segment   segment   = (Segment)iterator2.next();
                    long      start     = (long)segment.getStartTime();
                    long      end       = (long)(segment.getStartTime() + segment.getLength());
                    TimeFrame timeFrame = new TimeFrame(start, end);
                    streamSpeechRecognizer.startRecognition(url.openStream(), timeFrame);
                    SpeechResult result;
                    while ((result = streamSpeechRecognizer.getResult()) != null)
                    {
                        stats.collect(result);
                    }
                    streamSpeechRecognizer.stopRecognition();
                }
                Transform transform = stats.createTransform();
                streamSpeechRecognizer.setTransform(transform);
                Iterator iterator3 = speakerIntervals.iterator();
                while (iterator3.hasNext())
                {
                    Segment   segment2  = (Segment)iterator3.next();
                    long      start2    = (long)segment2.getStartTime();
                    long      end2      = (long)(segment2.getStartTime() + segment2.getLength());
                    TimeFrame timeFrame = new TimeFrame(start2, end2);
                    streamSpeechRecognizer.startRecognition(url.openStream(), timeFrame);
                    SpeechResult result;
                    while ((result = streamSpeechRecognizer.getResult()) != null)
                    {
                        [email protected]("Hypothesis: %s\n", new object[]
                        {
                            result.getHypothesis()
                        });
                    }
                    streamSpeechRecognizer.stopRecognition();
                }
            }
        }
        public static void main(string[] args)
        {
            Configuration configuration = new Configuration();

            configuration.setAcousticModelPath("resource:/edu/cmu/sphinx/models/en-us/en-us");
            configuration.setDictionaryPath("resource:/edu/cmu/sphinx/models/en-us/cmudict-en-us.dict");
            configuration.setLanguageModelPath("resource:/edu/cmu/sphinx/models/en-us/en-us.lm.bin");
            StreamSpeechRecognizer streamSpeechRecognizer = new StreamSpeechRecognizer(configuration);
            FileInputStream        fileInputStream        = new FileInputStream(new File(args[0]));

            fileInputStream.skip((long)((ulong)44));
            streamSpeechRecognizer.startRecognition(fileInputStream);
            SpeechResult result;

            while ((result = streamSpeechRecognizer.getResult()) != null)
            {
                [email protected](result.getHypothesis());
            }
            streamSpeechRecognizer.stopRecognition();
        }
Exemple #3
0
        public static void main(string[] args)
        {
            [email protected]("Loading models...");
            Configuration configuration = new Configuration();

            configuration.setAcousticModelPath("resource:/edu/cmu/sphinx/models/en-us/en-us");
            configuration.setDictionaryPath("resource:/edu/cmu/sphinx/models/en-us/cmudict-en-us.dict");
            configuration.setLanguageModelPath("resource:/edu/cmu/sphinx/models/en-us/en-us.lm.bin");
            StreamSpeechRecognizer streamSpeechRecognizer = new StreamSpeechRecognizer(configuration);
            InputStream            resourceAsStream       = ClassLiteral <TranscriberDemo> .Value.getResourceAsStream("/edu/cmu/sphinx/demo/aligner/10001-90210-01803.wav");

            resourceAsStream.skip((long)((ulong)44));
            streamSpeechRecognizer.startRecognition(resourceAsStream);
            SpeechResult result;

            while ((result = streamSpeechRecognizer.getResult()) != null)
            {
                [email protected]("Hypothesis: %s\n", new object[]
                {
                    result.getHypothesis()
                });
                [email protected]("List of recognized words and their times:");
                Iterator iterator = result.getWords().iterator();
                while (iterator.hasNext())
                {
                    WordResult wordResult = (WordResult)iterator.next();
                    [email protected](wordResult);
                }
                [email protected]("Best 3 hypothesis:");
                iterator = result.getNbest(3).iterator();
                while (iterator.hasNext())
                {
                    string text = (string)iterator.next();
                    [email protected](text);
                }
            }
            streamSpeechRecognizer.stopRecognition();
            resourceAsStream = ClassLiteral <TranscriberDemo> .Value.getResourceAsStream("/edu/cmu/sphinx/demo/aligner/10001-90210-01803.wav");

            resourceAsStream.skip((long)((ulong)44));
            Stats stats = streamSpeechRecognizer.createStats(1);

            streamSpeechRecognizer.startRecognition(resourceAsStream);
            while ((result = streamSpeechRecognizer.getResult()) != null)
            {
                stats.collect(result);
            }
            streamSpeechRecognizer.stopRecognition();
            Transform transform = stats.createTransform();

            streamSpeechRecognizer.setTransform(transform);
            resourceAsStream = ClassLiteral <TranscriberDemo> .Value.getResourceAsStream("/edu/cmu/sphinx/demo/aligner/10001-90210-01803.wav");

            resourceAsStream.skip((long)((ulong)44));
            streamSpeechRecognizer.startRecognition(resourceAsStream);
            while ((result = streamSpeechRecognizer.getResult()) != null)
            {
                [email protected]("Hypothesis: %s\n", new object[]
                {
                    result.getHypothesis()
                });
            }
            streamSpeechRecognizer.stopRecognition();
        }