Example #1
0
        static void Main()
        {
            // Path to the folder with models extracted from `stanford-corenlp-3.9.1-models.jar`
            var jarRoot = @"..\..\..\..\data\paket-files\nlp.stanford.edu\stanford-corenlp-full-2018-10-05\models";

            // Text for processing
            var text = "Kosgi Santosh sent an email to Stanford University. He didn't get a reply.";

            // Annotation pipeline configuration
            var props = new Properties();

            props.setProperty("annotators", "tokenize, ssplit, pos, lemma, parse, ner,dcoref");
            props.setProperty("ner.useSUTime", "0");

            // We should change current directory, so StanfordCoreNLP could find all the model files automatically
            var curDir = Environment.CurrentDirectory;

            Directory.SetCurrentDirectory(jarRoot);
            var pipeline = new StanfordCoreNLP(props);

            Directory.SetCurrentDirectory(curDir);

            // Annotation
            var annotation = new Annotation(text);

            pipeline.annotate(annotation);

            // Result - Pretty Print
            using (var stream = new ByteArrayOutputStream())
            {
                pipeline.prettyPrint(annotation, new PrintWriter(stream));
                Console.WriteLine(stream.toString());
                stream.close();
            }
        }
        //-------------------------------------------------------------------------------------- Stanford Core NLP -----------------------------------------
        //-- Better for Entity recognition

        public static void buildPipeline(string text)
        {//https://interviewbubble.com/getting-started-with-stanford-corenlp-a-stanford-corenlp-tutorial/
            // Path to the folder with models extracted from `stanford-corenlp-3.7.0-models.jar`
            var jarRoot = @"..\..\..\..\data\paket-files\nlp.stanford.edu\stanford-corenlp-full-2016-10-31\models";
            // creates a StanfordCoreNLP object, with POS tagging, lemmatization,
            // NER, parsing, and coreference resolution
            Properties props = new Properties();

            props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
            StanfordCoreNLP pipeline = new StanfordCoreNLP(props);

            // create an empty Annotation just with the given text
            Annotation document = new Annotation(text);

            // run all Annotators on this text
            pipeline.annotate(document);
            //Finished processing the document here
            // Result - Pretty Print
            using (var stream = new ByteArrayOutputStream())
            {
                pipeline.prettyPrint(document, new PrintWriter(stream));
                Debug.WriteLine(stream.toString());
                stream.close();
            }
        }
Example #3
0
        public String testFreemarker()
        {
            Assembly _assembly;

            _assembly = Assembly.GetExecutingAssembly();
            //Console.WriteLine(_assembly.
            try{
                Configuration cfg = new Configuration();
                cfg.setDirectoryForTemplateLoading(new File("template"));
                //cfg.setDirectoryForTemplateLoading(new File(""));
                cfg.setObjectWrapper(new DefaultObjectWrapper());


                Template temp = cfg.getTemplate("c.ftl");

                Map root = new HashMap();
                root.put("codeGen", this);

                ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
                Writer output = new OutputStreamWriter(outputStream);

                temp.process(root, output);
                output.flush();
                //System.Console.WriteLine(outputStream.toString());
                return(outputStream.toString());
            }
            catch (IOException exception) {
            } catch (TemplateException exception) {
            }
            return("");
        }
Example #4
0
        public void StanfordCoreNlpDemoThatChangeCurrentDirectory()
        {
            const string Text = "Kosgi Santosh sent an email to Stanford University. He didn't get a reply.";

            // Annotation pipeline configuration
            var props = new Properties();

            props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
            props.setProperty("sutime.binders", "0");

            // we should change current directory so StanfordCoreNLP could find all the model files
            var curDir = Environment.CurrentDirectory;

            Directory.SetCurrentDirectory(Config.JarRoot);
            var pipeline = new edu.stanford.nlp.pipeline.StanfordCoreNLP(props);

            Directory.SetCurrentDirectory(curDir);

            // Annotation
            var annotation = new Annotation(Text);

            pipeline.annotate(annotation);

            // Result - Pretty Print
            using (var stream = new ByteArrayOutputStream())
            {
                pipeline.prettyPrint(annotation, new PrintWriter(stream));
                Console.WriteLine(stream.toString());
            }

            this.CustomAnnotationPrint(annotation);
        }
Example #5
0
        public ParsedStatementFactory.ParseResult ParseStatement(string input)
        {
            var sent2Reader = new StringReader(input);
            var rawWords2   = _tokenizerFactory.getTokenizer(sent2Reader).tokenize();
            var parse       = _lp.apply(rawWords2);

            var gs  = _structureFactory.newGrammaticalStructure(parse);
            var tdl = gs.typedDependenciesCCprocessed();
            //System.Console.WriteLine("newGrammaticalStructure:\n{0}\n", gs);
            //System.Console.WriteLine("typedDependenciesCCprocessed:\n{0}\n", tdl);
            //var tp = new TreePrint("penn,typedDependenciesCollapsed");
            //tp.printTree(parse);
            //return new ParsedStatement(parse);

            var xmlTreePrint = new TreePrint("xmlTree, dependencies", "xml, collapsedDependencies", _tlp);
            var stream       = new ByteArrayOutputStream();

            xmlTreePrint.printTree(parse, new PrintWriter(stream));

            string xmlOutput = stream.toString() + "</s>";

            //System.Console.WriteLine("xml:\n{0}\n", xmlOutput);

            return(ParsedStatementFactory.CreateParsedStatement(xmlOutput));
            //System.Console.WriteLine("TreePrint: \n{0}\n", parse);
        }
Example #6
0
 public String Read(File file) {
    ByteArrayOutputStream buffer = new ByteArrayOutputStream();
    InputStream source = new FileInputStream(file);
    byte[] chunk = new byte[1024];
    int count = 0;
    while((count = source.Read(chunk)) != -1) {
       buffer.write(chunk, 0, count);
    }
    return buffer.toString("UTF-8");
 }
Example #7
0
        public NERActor()
        {
            Receive <string>(r =>
            {
                var req  = JsonConvert.DeserializeObject <SharedMessages.NERRequest>(r);
                var resp = new SharedMessages.NERResponse();
                foreach (var l in req.linesToProcess)
                {
                    var res        = string.Format("<Results>{0}</Results>\n", Program.classifier.classifyWithInlineXML(l)).Replace("&", "(amp)");
                    XDocument xdoc = XDocument.Parse(res);
                    resp.results.Add(xdoc.Root.Elements().Select(z => z.Value).ToList());
                }
                resp.section = req.section;
                resp.id      = req.id;
                resp.feed    = req.feed;

                Sender.Tell("ner:" + JsonConvert.SerializeObject(resp));
            });

            Receive <int>(r =>
            {
                var annotation = new Annotation(r.ToString());

                Program.pipeline.annotate(annotation);

                var NERs = annotation.get(typeof(edu.stanford.nlp.ling.CoreAnnotations.NamedEntityTagAnnotation));
                if (NERs != null)
                {
                    foreach (Annotation ner in NERs as ArrayList)
                    {
                        Console.WriteLine("NER: " + ner);
                    }
                }

                // these are all the sentences in this document
                // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types
                var sentences = annotation.get(typeof(edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation));
                if (sentences != null)
                {
                    foreach (Annotation sentence in sentences as ArrayList)
                    {
                        Console.WriteLine("Sent: " + sentence);
                    }
                }

                // Result - Pretty Print
                using (var stream = new ByteArrayOutputStream())
                {
                    var pw = new PrintWriter(stream);
                    Program.pipeline.prettyPrint(annotation, pw);
                    Console.WriteLine(stream.toString());
                    stream.close();
                }
            });
        }
Example #8
0
        public void StanfordCoreNlpDemoManualConfiguration()
        {
            Console.WriteLine(Environment.CurrentDirectory);
            const string Text = "Kosgi Santosh sent an email to Stanford University. He didn't get a reply.";

            // Annotation pipeline configuration
            var props = new Properties();

            props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
            props.setProperty("pos.model", Config.GetModel(@"pos-tagger\english-bidirectional\english-bidirectional-distsim.tagger"));
            props.setProperty("ner.model", Config.GetModel(@"ner\english.all.3class.distsim.crf.ser.gz"));
            props.setProperty("parse.model", Config.GetModel(@"lexparser\englishPCFG.ser.gz"));

            props.setProperty("dcoref.demonym", Config.GetModel(@"dcoref\demonyms.txt"));
            props.setProperty("dcoref.states", Config.GetModel(@"dcoref\state-abbreviations.txt"));
            props.setProperty("dcoref.animate", Config.GetModel(@"dcoref\animate.unigrams.txt"));
            props.setProperty("dcoref.inanimate", Config.GetModel(@"dcoref\inanimate.unigrams.txt"));
            props.setProperty("dcoref.male", Config.GetModel(@"dcoref\male.unigrams.txt"));
            props.setProperty("dcoref.neutral", Config.GetModel(@"dcoref\neutral.unigrams.txt"));
            props.setProperty("dcoref.female", Config.GetModel(@"dcoref\female.unigrams.txt"));
            props.setProperty("dcoref.plural", Config.GetModel(@"dcoref\plural.unigrams.txt"));
            props.setProperty("dcoref.singular", Config.GetModel(@"dcoref\singular.unigrams.txt"));
            props.setProperty("dcoref.countries", Config.GetModel(@"dcoref\countries"));
            props.setProperty("dcoref.extra.gender", Config.GetModel(@"dcoref\namegender.combine.txt"));
            props.setProperty("dcoref.states.provinces", Config.GetModel(@"dcoref\statesandprovinces"));
            props.setProperty("dcoref.singleton.predictor", Config.GetModel(@"dcoref\singleton.predictor.ser"));
            props.setProperty("dcoref.big.gender.number", Config.GetModel(@"dcoref\gender.data.gz"));

            var sutimeRules = new[] {
                Config.GetModel(@"sutime\defs.sutime.txt"),
                Config.GetModel(@"sutime\english.holidays.sutime.txt"),
                Config.GetModel(@"sutime\english.sutime.txt")
            };

            props.setProperty("sutime.rules", String.Join(",", sutimeRules));
            props.setProperty("sutime.binders", "0");

            var pipeline = new edu.stanford.nlp.pipeline.StanfordCoreNLP(props);

            // Annotation
            var annotation = new Annotation(Text);

            pipeline.annotate(annotation);

            // Result - Pretty Print
            using (var stream = new ByteArrayOutputStream())
            {
                pipeline.prettyPrint(annotation, new PrintWriter(stream));
                Console.WriteLine(stream.toString());
            }

            this.CustomAnnotationPrint(annotation);
        }
        public NlpResult DeserializeInput(StanfordCoreNLP pipeline, NlpResult nlpResult, string stringInput)
        {
            // Annotation
            var annotation = new Annotation(stringInput);

            pipeline.annotate(annotation);

            // Result - Pretty Print
            using (var stream = new ByteArrayOutputStream())
            {
                pipeline.jsonPrint(annotation, new PrintWriter(stream));

                _jsonContentProvider.PopulateFromString(nlpResult, stream.toString());

                Debug.WriteLine(stream.toString());

                stream.close();
            }

            return(nlpResult);
        }
Example #10
0
        public static String Parse_Data(String sent, StanfordCoreNLP pipeline1)
        {// extract meaningful words from user query
            // Text for processing
            var text = sent;
            // Annotation
            var annotation = new edu.stanford.nlp.pipeline.Annotation(text);

            pipeline1.annotate(annotation);
            // Result - Pretty Print
            string output;

            using (var stream = new ByteArrayOutputStream())
            {
                pipeline1.prettyPrint(annotation, new PrintWriter(stream));
                System.Console.WriteLine(" it's stanford time ");
                output = stream.toString();
                stream.close();
            }
            return(output);
        }
Example #11
0
        public static AnnotationObject Annotate(string content)
        {
            // Annotation
            var annotation = new Annotation(content);

            Pipeline.annotate(annotation);

            // Result - Print
            using var stream = new ByteArrayOutputStream();

            Pipeline.jsonPrint(annotation, new PrintWriter(stream));

            //-----
            string serialized   = stream.toString().Replace("\n", "");
            var    deserialized = Newtonsoft.Json.JsonConvert.DeserializeObject <AnnotationObject>(serialized);

            //-----
            stream.close();

            return(deserialized);
        }
Example #12
0
        public void CustomAnnotationPrint(Annotation annotation)
        {
            Console.WriteLine("-------------");
            Console.WriteLine("Custom print:");
            Console.WriteLine("-------------");
            var sentences = (ArrayList)annotation.get(new CoreAnnotations.SentencesAnnotation().getClass());

            foreach (CoreMap sentence in sentences)
            {
                Console.WriteLine("\n\nSentence : '{0}'", sentence);

                var tokens = (ArrayList)sentence.get(new CoreAnnotations.TokensAnnotation().getClass());
                foreach (CoreLabel token in tokens)
                {
                    var word = token.get(new CoreAnnotations.TextAnnotation().getClass());
                    var pos  = token.get(new CoreAnnotations.PartOfSpeechAnnotation().getClass());
                    var ner  = token.get(new CoreAnnotations.NamedEntityTagAnnotation().getClass());
                    Console.WriteLine("{0} \t[pos={1}; ner={2}]", word, pos, ner);
                }

                Console.WriteLine("\nTree:");
                var tree = (Tree)sentence.get(new TreeCoreAnnotations.TreeAnnotation().getClass());
                using (var stream = new ByteArrayOutputStream())
                {
                    tree.pennPrint(new PrintWriter(stream));
                    Console.WriteLine("The first sentence parsed is:\n {0}", stream.toString());
                }

                Console.WriteLine("\nDependencies:");
                var deps = (SemanticGraph)sentence.get(new SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation().getClass());
                foreach (SemanticGraphEdge edge in deps.edgeListSorted().toArray())
                {
                    var gov = edge.getGovernor();
                    var dep = edge.getDependent();
                    Console.WriteLine(
                        "{0}({1}-{2},{3}-{4})", edge.getRelation(),
                        gov.word(), gov.index(), dep.word(), dep.index());
                }
            }
        }
Example #13
0
        public void ProcessText(string text, string outputPath)
        {
            var props = new Properties();

            props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse");
            props.setProperty("ner.useSUTime", "0");

            // We should change current directory, so StanfordCoreNLP could find all the model files automatically
            var curDir = Environment.CurrentDirectory;

            Directory.SetCurrentDirectory(stanfordJarRoot);
            var pipeline = new StanfordCoreNLP(props);

            Directory.SetCurrentDirectory(curDir);
            Console.WriteLine("Starting to parse.");
            // Annotation
            var annotation = new Annotation(text);

            pipeline.annotate(annotation);

            // Result - Pretty Print
            Console.WriteLine("Parsing complete.. writing to file.");
            string jsonOutput;

            using (var stream = new ByteArrayOutputStream())
            {
                pipeline.jsonPrint(annotation, new PrintWriter(stream));
                jsonOutput = stream.toString();
                stream.close();
            }

            using (var file = new StreamWriter(outputPath))
            {
                file.WriteLine(jsonOutput);
            }
            Console.WriteLine("Processing complete.");
        }
        public static void Main()
        {
            // Path to the folder with models extracted from `stanford-corenlp-3.6.0-models.jar`
            var jarRoot = @"data";

            // Text for processing
            var text = "Kosgi Santosh sent an email to Stanford University. He didn't get a reply.";
            // var text = "The following transaction was received from Admin entered on 04/30/2016 at 11:31 PM CDT and filed on 04/28/2016 ";

            // Annotation pipeline configuration
            var props = new Properties();

            props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
            props.setProperty("ner.useSUTime", "0");

            // We should change current directory, so StanfordCoreNLP could find all the model files automatically
            var curDir = Environment.CurrentDirectory;

            Directory.SetCurrentDirectory(jarRoot);
            var pipeline = new StanfordCoreNLP(props);

            Directory.SetCurrentDirectory(curDir);

            // Annotation
            var annotation = new Annotation(text);

            pipeline.annotate(annotation);

            // Result - Pretty Print
            using (var stream = new ByteArrayOutputStream())
            {
                pipeline.prettyPrint(annotation, new PrintWriter(stream));
                Console.WriteLine(stream.toString());
                stream.close();
            }
        }
Example #15
0
        static void Main(string[] args)
        {
            var jarRoot = @"..\..\..\es\";

            /* CultureInfo ci = new CultureInfo("es-ES");
            *  Thread.CurrentThread.CurrentCulture = ci;
            *  Thread.CurrentThread.CurrentUICulture = ci;*/
            // Text for processing
            var text = "\"O pior erro que poderíamos cometer era, à boleia do crescimento económico, termos a ilusão de que os problemas estruturais da zona euro ficaram resolvidos\", defende António Costa, que deu uma entrevista ao Público que será publicada este domingo e onde o primeiro-ministro diz que a moeda única foi um bonus para a economia alemã.";


            string sb = "António Costa defende que, na criação da moeda única, houve um \"excesso de voluntarismo político\" e nem todos terão percebido que \"o euro foi o maior bónus à competitividade da economia alemã que a Europa lhe poderia ter oferecido\".";

            var propsFile = Path.Combine(jarRoot, "StanfordCoreNLP-spanish.properties");
            // Annotation pipeline configuration
            var props = new Properties();

            // props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, depparse, kbp");
            props.load(new FileReader(propsFile));
            props.put("ner.useSUTime", "0");
            props.put("tokenize.verbose", "true");
            // props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, depparse, kbp, coref,entitymentions,quote1");
            props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, depparse, kbp, coref,entitymentions, quote");
            //props.setProperty("customAnnotatorClass.quote1", "edu.stanford.nlp.pipeline.QuoteAnnotator");
            //props.setProperty("quote1.attributeQuotes", "false");

            /*String modPath = @"C:\Users\anacorreia\source\repos\ConsoleApp1\es\";
             * props.put("pos.model", modPath + @"edu\stanford\nlp\models\pos-tagger\spanish\spanish-ud.tagger");
             * props.put("tokenize.language","es");
             *
             * props.put("ner.model", modPath + @"edu\stanford\nlp\models\ner\spanish.ancora.distsim.s512.crf.ser.gz");
             * props.put("ner.applyNumericClassifiers", "1");
             *
             * props.put("ner.language", "1");
             * props.put("sutime.language", "1");
             *
             * props.put("parse.model", modPath + @"edu\stanford\nlp\models\lexparser\spanishPCFG.ser.gz");
             * props.put("depparse.model", modPath + @"edu\stanford\nlp\models\parser\nndep\UD_Spanish.gz");
             * props.put("depparse.language", "spanish");
             *
             * props.put("ner.fine.regexner.mapping", modPath + @"edu\stanford\nlp\models\kbp\spanish\gazetteers\");
             * props.put("ner.fine.regexner.validpospattern", "^(NOUN|ADJ|PROPN).*");
             * props.put("ner.fine.regexner.ignorecase", "1");
             * props.put("ner.fine.regexner.noDefaultOverwriteLabels", "CITY,COUNTRY,STATE_OR_PROVINCE");
             *
             * props.put("kbp.semgrex", modPath + @"edu\stanford\nlp\models\kbp\spanish\semgrex");
             * props.put("kbp.tokensregex", modPath + @"edu\stanford\nlp\models\kbp\spanish\tokensregex");
             * props.put("kbp.model", "none");
             * props.put("kbp.language", "es");
             *
             * props.put("entitylink.caseless", "1");
             * props.put("entitylink.wikidict", modPath + @"edu\stanford\nlp\models\kbp\spanish\wikidict_spanish.tsv");
             */

            // We should change current directory, so StanfordCoreNLP could find all the model files automatically
            var curDir = Environment.CurrentDirectory;

            Directory.SetCurrentDirectory(jarRoot);
            var pipeline = new StanfordCoreNLP(props);


            // Annotation

            CoreDocument testDoc = new CoreDocument(sb);

            pipeline.annotate(testDoc);

            // var annotation = new Annotation(sb);
            //pipeline.annotate(annotation);

            string sxc = "\"O pior erro que poderíamos cometer era, à boleia do crescimento económico, termos a ilusão de que os problemas estruturais da zona euro ficaram resolvidos\", defende António Costa, que deu uma entrevista ao Público que será publicada este domingo e onde o primeiro-ministro diz que a moeda única foi um \"bónus\" para a economia alemã. Numa entrevista em que, a julgar pelo excerto publicado para já, se focou essencialmente em questões europeias, e não na política interna, o primeiro-ministro mostrou-se, também, favorável à introdução de impostos europeus." +
                         "António Costa defende que, na criação da moeda única, houve um \"excesso de voluntarismo político\" e nem todos terão percebido que \"o euro foi o maior bónus à competitividade da economia alemã que a Europa lhe poderia ter oferecido\". Agora, a menos que se tomem medidas de correção das assimetrias, \"a zona euro será mais uma vez confrontada com uma crise como a que vivemos agora\"." +
                         "O objetivo de todos os líderes europeus deve ser evitar que se volte a cometer \"o erro que nos acompanhou desde 2000 até 2011\", isto é, marcar passo nas reformas. E uma das principais ferramentas de que a zona euro necessita é de um orçamento da zona euro destinado a financiar reformas para acelerar a convergência das economias." +
                         "Com a saída do Reino Unido e com a necessidade de investir na segurança, na defesa e na ciência, António Costa defende que \"ou estamos disponíveis para sacrificar a parte do Orçamento afeta às políticas de coesão e à PAC, ou temos de encontrar outras fontes de receita\". Onde? Mais contribuições dos Estados, isto é, \"mais impostos dos portugueses\", ou receitas próprias criadas pela União, nomeadamente através de impostos europeus.";

            CoreDocument testDoc3 = new CoreDocument(sxc);

            pipeline.annotate(testDoc3);


            // Result - Pretty Print
            using (var stream = new ByteArrayOutputStream())
            {
                pipeline.prettyPrint(testDoc.annotation(), new PrintWriter(stream));
                Console.WriteLine("Stream testDoc3 " + stream.toString());
                stream.close();
            }

            using (var stream = new ByteArrayOutputStream())
            {
                pipeline.prettyPrint(testDoc3.annotation(), new PrintWriter(stream));
                Console.WriteLine("Stream testDoc3 " + stream.toString());
                stream.close();
            }



            Directory.SetCurrentDirectory(curDir);
        }
Example #16
0
        private object StanfordNLP(string input)
        {
            string npath = Directory.GetCurrentDirectory();

            string NLPquery = "";
            string getNoun  = "";
            bool   compound = false;
            var    text     = input;
            // Annotation pipeline configuration
            var props = new Properties();

            props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
            props.setProperty("pos.model", npath + @"\edu\stanford\nlp\models\pos-tagger\english-bidirectional\english-bidirectional-distsim.tagger");
            props.setProperty("ner.model", npath + @"\edu\stanford\nlp\models\ner\english.all.3class.distsim.crf.ser.gz");
            props.setProperty("parse.model", npath + @"\edu\stanford\nlp\models\lexparser\englishPCFG.ser.gz");
            props.setProperty("dcoref.demonym", npath + @"\edu\stanford\nlp\models\dcoref\demonyms.txt");
            props.setProperty("dcoref.states", npath + @"\edu\stanford\nlp\models\dcoref\state-abbreviations.txt");
            props.setProperty("dcoref.animate", npath + @"\edu\stanford\nlp\models\dcoref\animate.unigrams.txt");
            props.setProperty("dcoref.inanimate", npath + @"\edu\stanford\nlp\models\dcoref\inanimate.unigrams.txt");
            props.setProperty("dcoref.male", npath + @"\edu\stanford\nlp\models\dcoref\male.unigrams.txt");
            props.setProperty("dcoref.neutral", npath + @"\edu\stanford\nlp\models\dcoref\neutral.unigrams.txt");
            props.setProperty("dcoref.female", npath + @"\edu\stanford\nlp\models\dcoref\female.unigrams.txt");
            props.setProperty("dcoref.plural", npath + @"\edu\stanford\nlp\models\dcoref\plural.unigrams.txt");
            props.setProperty("dcoref.singular", npath + @"\edu\stanford\nlp\models\dcoref\singular.unigrams.txt");
            props.setProperty("dcoref.countries", npath + @"\edu\stanford\nlp\models\dcoref\countries");
            props.setProperty("dcoref.extra.gender", npath + @"\edu\stanford\nlp\models\dcoref\namegender.combine.txt");
            props.setProperty("dcoref.states.provinces", npath + @"\edu\stanford\nlp\models\dcoref\statesandprovinces");
            props.setProperty("dcoref.singleton.predictor", npath + @"\edu\stanford\nlp\models\dcoref\singleton.predictor.ser");
            props.setProperty("dcoref.big.gender.number", npath + @"\edu\stanford\nlp\models\dcoref\gender.map.ser.gz");
            props.setProperty("sutime.rules", npath + @"\edu\stanford\nlp\models\sutime\defs.sutime.txt, " + npath + @"\edu\stanford\nlp\models\sutime\english.holidays.sutime.txt, " + npath + @"\edu\stanford\nlp\models\sutime\english.sutime.txt");
            props.setProperty("sutime.binders", "0");
            props.setProperty("ner.useSUTime", "0");
            var pipeline = new StanfordCoreNLP(props);

            // Annotation
            var annotation = new Annotation(text);

            pipeline.annotate(annotation);

            List <string> NLPDATA = new List <string>();

            using (var stream = new ByteArrayOutputStream())
            {
                //  pipeline.prettyPrint(annotation, new PrintWriter(stream));
                pipeline.conllPrint(annotation, new PrintWriter(stream));

                string output = stream.toString();
                Console.WriteLine(output);

                string[] lines = Regex.Split(output, "[\r\n]+");
                // Console.WriteLine(lines.Length);
                string[][] wordMatrix = new string[lines.Length][];
                for (var i = 0; i < wordMatrix.Length; i++)
                {
                    wordMatrix[i] = new string[10];
                    string[] words = Regex.Split(lines[i], "[^a-zA-Z0-9]+");
                    // Console.WriteLine(words.Length);
                    for (int ii = 0; ii < words.Length; ii++)
                    {
                        wordMatrix[i][ii] = words[ii];
                    }
                }

                for (int i = 0; i < lines.Length; i++)
                {
                    for (int ii = 0; ii < wordMatrix[i].Length; ii++)
                    {
                        if (wordMatrix[i][ii] == "VB" || wordMatrix[i][ii] == "RP" || wordMatrix[i][ii] == "NN" || wordMatrix[i][ii] == "NNP")
                        {
                            NLPDATA.Add(wordMatrix[i][ii] + " " + wordMatrix[i][ii - 1] + " " + wordMatrix[i][6]);
                            NLPquery = NLPquery + " " + wordMatrix[i][ii - 1];
                        }
                        if (wordMatrix[i][ii] == "NN" || wordMatrix[i][ii] == "NNP" || wordMatrix[i][ii] == "NNS")
                        {
                            if (wordMatrix[i][6] == "compound" || wordMatrix[i][6] == "xcomp")
                            {
                                getNoun  = wordMatrix[i][ii - 1];
                                compound = true;
                            }
                            if (wordMatrix[i][6] == "dep" && compound != true && wordMatrix[i][ii] == "NN")
                            {
                                getNoun = wordMatrix[i][ii - 1];
                            }
                            if (wordMatrix[i][6] == "dobj" && compound != true && wordMatrix[i][ii] == "NN")
                            {
                                getNoun = wordMatrix[i][ii - 1];
                            }
                        }
                    }
                }
                stream.close();
            }
            //Intent, Name,
            NLP nLP = new NLP();
            // nLP.getAnswer(NLPquery.Trim());
            // Console.WriteLine(nLP.getProbability(getNoun.Trim()).ToString());
            string action = nLP.getAnswer(NLPquery.Trim());
            string nprob  = nLP.getProbability(getNoun.Trim()).ToString();
            string aprob  = nLP.getProbability(NLPquery.Trim()).ToString();

            if (action == getNoun)
            {
                nprob = "0";
                aprob = "0";
            }
            compound = false;
            return(new { intent = action, noun = getNoun, action_prob = aprob, noun_prob = nprob });
        }