static void Main(string[] args) { String[] file = Directory.GetFiles(@"..\..\..\", "*.txt"); StreamWriter sw = new StreamWriter(@"..\..\..\output\Html.txt"); foreach (String files in file) { using (StreamReader sr = new StreamReader(files)) { while (sr.Peek() != -1) { string line = sr.ReadLine(); int i; java.io.InputStream modelin = new java.io.FileInputStream(string.Format(@"{0}\en-sent.bin", @"..\Debug")); SentenceModel model = new SentenceModel(modelin); SentenceDetector detector = new SentenceDetectorME(model); string[] sents = detector.sentDetect(line); foreach (var sent in sents) { sw.WriteLine(sent); } } } sw.Flush(); } sw.Close(); }
private static POSTaggerME preparePOSTagger() { using (var modelInputStream = new java.io.FileInputStream(@"c:\projects\OTAutocompleteDSL\dep\en-pos-maxent.bin")) { return new POSTaggerME(new POSModel(modelInputStream)); //load the model } }
static Sentenizer() { var modelFile = HttpContext.Current.Server.MapPath("~/Files/TextAnalytics/en-sentence-model-dragon.bin"); ModelIn = new java.io.FileInputStream(modelFile); Model = new SentenceModel(ModelIn); }
public void Chunker() { using var modelIn = new java.io.FileInputStream(GetModel("en-chunker.bin")); var model = new opennlp.tools.chunker.ChunkerModel(modelIn); var chunker = new opennlp.tools.chunker.ChunkerME(model); var sent = new[] { "Rockwell", "International", "Corp.", "'s", "Tulsa", "unit", "said", "it", "signed", "a", "tentative", "agreement", "extending", "its", "contract", "with", "Boeing", "Co.", "to", "provide", "structural", "parts", "for", "Boeing", "'s", "747", "jetliners", "." }; var pos = new[] { "NNP", "NNP", "NNP", "POS", "NNP", "NN", "VBD", "PRP", "VBD", "DT", "JJ", "NN", "VBG", "PRP$", "NN", "IN", "NNP", "NNP", "TO", "VB", "JJ", "NNS", "IN", "NNP", "POS", "CD", "NNS", "." }; var tags = chunker.chunk(sent, pos); System.Console.WriteLine(string.Join(";", tags)); Assert.AreEqual(28, tags.Length); var probs = chunker.probs(); System.Console.WriteLine(string.Join(";", probs)); Assert.AreEqual(28, probs.Length); }
static Sentenizer() { var modelFile = ConfigurationManager.AppSettings["ModelSentenizer"] ?? string.Empty; ModelIn = new java.io.FileInputStream(modelFile); Model = new SentenceModel(ModelIn); }
private static TokenizerME prepareTokenizer() { using (var tokenInputStream = new java.io.FileInputStream(@"c:\projects\OTAutocompleteDSL\dep\en-token.bin")) { var tokenModel = new opennlp.tools.tokenize.TokenizerModel(tokenInputStream); //load the token model return new opennlp.tools.tokenize.TokenizerME(tokenModel); //create the tokenizer } }
private static opennlp.tools.parser.ParserModel OpenJavaModel(string fileName) { java.io.FileInputStream inputStream = null; try { inputStream = OpenNLP.OpenInputStream(fileName); return(new opennlp.tools.parser.ParserModel(inputStream)); } finally { inputStream?.close(); } }
private void LoadParser() { if (!alreadyLoadParser) { java.io.FileInputStream modelInpStream = new java.io.FileInputStream("Resources\\en-parser-chunking.bin"); ParserModel parserModel = new ParserModel(modelInpStream); parser = ParserFactory.create(parserModel); alreadyLoadParser = true; } }
private void LoadTokenizer() { if (!alreadyLoadTagger) { java.io.FileInputStream modelInpStream = new java.io.FileInputStream("Resources\\en-token.bin"); TokenizerModel tokenizerModel = new TokenizerModel(modelInpStream); tokenizer = new TokenizerME(tokenizerModel); alreadyLoadTagger = true; } }
private void LoadSentenceDetector() { if (!alreadyLoadSentenceDetector) { java.io.FileInputStream modelInpStream = new java.io.FileInputStream("Resources\\en-sent.bin"); SentenceModel sentenceModel = new SentenceModel(modelInpStream); sentenceDetector = new SentenceDetectorME(sentenceModel); alreadyLoadSentenceDetector = true; } }
private void LoadTagger() { if (!alreadyLoadTokenizer) { java.io.FileInputStream modelInpStream = new java.io.FileInputStream("Resources\\en-pos-maxent.bin"); POSModel posModel = new POSModel(modelInpStream); tagger = new POSTaggerME(posModel); alreadyLoadTokenizer = true; } }
private void LoadChunker() { if (!alreadyLoadChunker) { java.io.FileInputStream modelInpStream = new java.io.FileInputStream("Resources\\en-chunker.bin"); ChunkerModel chunkerModel = new ChunkerModel(modelInpStream); chunker = new ChunkerME(chunkerModel); alreadyLoadChunker = true; } }
public void Tokenize() { var modelStream = new java.io.FileInputStream("../../Models/en-token.bin"); var model = new TokenizerModel(modelStream); var tokenizer = new TokenizerME(model); var txt = File.ReadAllText(@"c:\dev\d-mill\uspe\Data\uspe-sentenced.txt"); var tokens = tokenizer.tokenize(txt); }
public NLPTool(string SSModel, string TKModel, string POSModel, string CKModel, string PersonModel, string OrgModel, string LocModel, string DateModel, string MoneyModel, string PercentageModel, string TimeModel, string ParseModel, string MaltParseModel, string LemmatizeDict) { nlp_models = new NLPToolBox(SSModel, TKModel, POSModel, CKModel, PersonModel, OrgModel, LocModel, DateModel, MoneyModel, PercentageModel, TimeModel, ParseModel, MaltParseModel); java.io.InputStream input_stream = new java.io.FileInputStream(LemmatizeDict); lemmatizer = new SimpleLemmatizer(input_stream); input_stream.close(); }
static Tokenizer() { var modelFile = HttpContext.Current.Server.MapPath("~/Files/TextAnalytics/en-token.bin"); if (!File.Exists(modelFile)) { throw new FileNotFoundException("Unable to find tokenizer model file at " + modelFile); } ModelIn = new java.io.FileInputStream(modelFile); Model = new TokenizerModel(ModelIn); }
static void Main(string[] args) { java.io.InputStream model_src = new java.io.FileInputStream(@"..\..\..\..\en-sent.bin"); SentenceModel smodel = new SentenceModel(model_src); detector = new SentenceDetectorME(smodel); foreach (String src in Directory.GetFiles(@"..\..\..\..\Dataset")) { lab5(src); } }
public void SentenceDetection() { using var modelIn = new java.io.FileInputStream(GetModel("en-sent.bin")); var model = new opennlp.tools.sentdetect.SentenceModel(modelIn); var sentenceDetector = new opennlp.tools.sentdetect.SentenceDetectorME(model); var sentences = sentenceDetector.sentDetect(" First sentence. Second sentence. "); System.Console.WriteLine(string.Join(";", sentences)); Assert.AreEqual(2, sentences.Length); }
public void Tokenization() { using var modelIn = new java.io.FileInputStream(GetModel("en-token.bin")); var model = new opennlp.tools.tokenize.TokenizerModel(modelIn); var tokenizer = new opennlp.tools.tokenize.TokenizerME(model); var tokens = tokenizer.tokenize("An input sample sentence."); System.Console.WriteLine(string.Join(";", tokens)); Assert.AreEqual(5, tokens.Length); }
// // DO NOT USE THIS TESTS AS SAMPLES TO BUILD YOUR STUFF ! // // I use some things here, that are not needed in a "real" implementation // private static opennlp.tools.tokenize.TokenizerModel OpenJavaModel(string fileName) { java.io.FileInputStream inputStream = null; try { inputStream = OpenNLP.OpenInputStream(fileName); return(new opennlp.tools.tokenize.TokenizerModel(inputStream)); } finally { if (inputStream != null) { inputStream.close(); } } }
private void UnzipFile(string zipFileName) { try { string fileName; fileName = "EQ" + dateTimePicker1.Value.Day.ToString("00") + dateTimePicker1.Value.Month.ToString("00") + dateTimePicker1.Value.Year.ToString().Replace("20", "") + ".CSV";; sbyte[] buf = new sbyte[1024]; int len; //filename = "EQ" + dateTimePicker1.Value.Day.ToString("00") + dateTimePicker1.Value.Month.ToString("00") + dateTimePicker1.Value.Year.ToString("00") + ".csv" ; //filename = filename.Replace("zip", "CSV"); java.io.FileInputStream fis = new java.io.FileInputStream(zipFileName); java.util.zip.ZipInputStream zis = new java.util.zip.ZipInputStream(fis); java.util.zip.ZipEntry ze; while ((ze = zis.getNextEntry()) != null) { if (fileName == ze.getName()) { // File name format in zip file is: // folder/subfolder/filename // Let's check... int index = fileName.LastIndexOf('/'); if (index > 1) { string folder = fileName.Substring(0, index); DirectoryInfo di = new DirectoryInfo(folder); // Create directory if not exists if (!di.Exists) { di.Create(); } } java.io.FileOutputStream fos = new java.io.FileOutputStream(fileName); while ((len = zis.read(buf)) >= 0) { fos.write(buf, 0, len); } fos.close(); } } zis.close(); fis.close(); } catch (Exception ex) { MessageBox.Show("Error" + ex.Message.ToString()); } finally { // Close everything } }
private void Initial() { var basedir = (string)GlobalParameter.Get(DefaultParameter.Field.opennlp_model_dir); var modelInputStream = new java.io.FileInputStream(Path.Combine(basedir, "en-ner-location.bin")); //load the name model into a stream var model = new opennlp.tools.namefind.TokenNameFinderModel(modelInputStream); //load the model locationNameFinder = new opennlp.tools.namefind.NameFinderME(model); //create the namefinder modelInputStream = new java.io.FileInputStream(Path.Combine(basedir, "en-ner-person.bin")); model = new opennlp.tools.namefind.TokenNameFinderModel(modelInputStream); personNameFinder = new opennlp.tools.namefind.NameFinderME(model); modelInputStream = new java.io.FileInputStream(Path.Combine(basedir, "en-ner-organization.bin")); model = new opennlp.tools.namefind.TokenNameFinderModel(modelInputStream); organizationNameFinder = new opennlp.tools.namefind.NameFinderME(model); }
/// <summary>Obtains the audio file format of the File provided.</summary> /// <remarks> /// Obtains the audio file format of the File provided. /// The File must point to valid audio file data. /// </remarks> /// <param name="file"> /// the File from which file format information should be /// extracted. /// </param> /// <returns>an AudioFileFormat object describing the audio file format.</returns> /// <exception> /// UnsupportedAudioFileException /// if the File does not point to /// a valid audio file data recognized by the system. /// </exception> /// <exception> /// IOException /// if an I/O exception occurs. /// </exception> /// <exception cref="javax.sound.sampled.UnsupportedAudioFileException"></exception> /// <exception cref="System.IO.IOException"></exception> public override javax.sound.sampled.AudioFileFormat getAudioFileFormat(java.io.File file) { java.io.InputStream inputStream = null; try { inputStream = new java.io.FileInputStream(file); return getAudioFileFormat(inputStream, (int)file.length()); } finally { inputStream.close(); } }
public static Classifier TryLoadClassifier(string modelFileName) { if (!System.IO.File.Exists(modelFileName)) { byte[] d = Instance.LoadModelData(modelFileName); if (d != null) { System.IO.File.WriteAllBytes(modelFileName, d); } } if (System.IO.File.Exists(modelFileName)) { java.io.FileInputStream fis = null; try { Classifier cls = null; if (modelFileName.Contains("MLEA111")) { cls = Feng.Windows.Utils.SerializeHelper.Deserialize <MincostLiblinearClassifier>(modelFileName); } else { //fis = new java.io.FileInputStream(modelFileName); //var ois = new java.io.ObjectInputStream(fis); //var cls = (Classifier)ois.readObject(); //ois.close(); //fis.close(); cls = (Classifier)weka.core.SerializationHelper.read(modelFileName); } //if (cls.GetType() != ClassifierType) //{ // throw new ArgumentException("Classifier Type is wrong!"); //} return(cls); } catch (Exception) { if (fis != null) { fis.close(); } System.IO.File.Delete(modelFileName); Instance.DeleteModelData(modelFileName); } } return(null); }
public void NameFinder() { using var modelIn = new java.io.FileInputStream(GetModel("en-ner-person.bin")); var model = new opennlp.tools.namefind.TokenNameFinderModel(modelIn); var nameFinder = new opennlp.tools.namefind.NameFinderME(model); var sentence = new[] { "Pierre", "Vinken", "is", "61", "years", "old", "." }; var nameSpans = nameFinder.find(sentence); System.Console.WriteLine(string.Join(";", nameSpans.Select(x => x.toString()))); Assert.AreEqual(1, nameSpans.Length); }
static Tokenizer() { var modelFile = ConfigurationManager.AppSettings["ModelTokenizer"] ?? string.Empty; if(string.IsNullOrWhiteSpace(modelFile)) throw new Exception("ModelTokenizer setting not defined in App.Config"); modelFile = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, modelFile); if(!File.Exists(modelFile)) throw new FileNotFoundException("Unable to find tokenizer model file at " + modelFile); ModelIn = new java.io.FileInputStream(modelFile); Model = new TokenizerModel(ModelIn); }
public static ZipFile UpdateZipFile(ZipFile file, FilterEntryMethod filter, string[] newFiles) { string prev = file.getName(); string tmp = System.IO.Path.GetTempFileName(); ZipOutputStream to = new ZipOutputStream(new java.io.FileOutputStream(tmp)); try { CopyEntries(file, to, filter); // add entries here if (newFiles != null) { foreach (string f in newFiles) { ZipEntry z = new ZipEntry(f.Remove(0, System.IO.Path.GetPathRoot(f).Length)); z.setMethod(ZipEntry.DEFLATED); to.putNextEntry(z); try { java.io.FileInputStream s = new java.io.FileInputStream(f); try { CopyStream(s, to); } finally { s.close(); } } finally { to.closeEntry(); } } } } finally { to.close(); } file.close(); // now replace the old file with the new one System.IO.File.Copy(tmp, prev, true); System.IO.File.Delete(tmp); return(new ZipFile(prev)); }
static void Main(string[] args) { // set up logging clog = LoggingConfigurator.configureLogging(); clog.Info("Hello from Common Logging"); string projectDir = System.IO.Directory.GetParent( System.IO.Directory.GetParent( Environment.CurrentDirectory.ToString()).ToString()).ToString() + "\\"; // the docx 'template' String input_DOCX = projectDir + @"src\samples\resources\ContentControlBind\binding-simple.docx"; // the instance data String input_XML = projectDir + @"src\samples\resources\ContentControlBind\binding-simple-data.xml"; // resulting docx String OUTPUT_DOCX = projectDir + @"OUT_ContentControlsMergeXML.docx"; // Configure to find docx4j.properties // .. add as URL the dir containing docx4j.properties (not the file itself!) Plutext.PropertiesConfigurator.setDocx4jPropertiesDir(projectDir + @"src\samples\resources\"); // Load input_template.docx WordprocessingMLPackage wordMLPackage = org.docx4j.Docx4J.load(new java.io.File(input_DOCX)); // Open the xml stream java.io.FileInputStream xmlStream = new java.io.FileInputStream(new java.io.File(input_XML)); // Do the binding: // FLAG_NONE means that all the steps of the binding will be done, // otherwise you could pass a combination of the following flags: // FLAG_BIND_INSERT_XML: inject the passed XML into the document // FLAG_BIND_BIND_XML: bind the document and the xml (including any OpenDope handling) // FLAG_BIND_REMOVE_SDT: remove the content controls from the document (only the content remains) // FLAG_BIND_REMOVE_XML: remove the custom xml parts from the document //Docx4J.bind(wordMLPackage, xmlStream, Docx4J.FLAG_NONE); //If a document doesn't include the Opendope definitions, eg. the XPathPart, //then the only thing you can do is insert the xml //the example document binding-simple.docx doesn't have an XPathPart.... Docx4J.bind(wordMLPackage, xmlStream, Docx4J.FLAG_BIND_INSERT_XML & Docx4J.FLAG_BIND_BIND_XML); //Save the document Docx4J.save(wordMLPackage, new java.io.File(OUTPUT_DOCX), Docx4J.FLAG_NONE); clog.Info("Saved: " + OUTPUT_DOCX); }
public static object readObjectFromFile(string fileName) { try { java.io.File f = new java.io.File(fileName); f.setReadable(true); java.io.FileInputStream streamIn = new java.io.FileInputStream(f); java.io.ObjectInputStream ii = new java.io.ObjectInputStream(streamIn); object obj = ii.readObject(); streamIn.close(); return(obj); } catch (System.Exception) { return(null); } }
public void Parser() { using var modelIn = new java.io.FileInputStream(GetModel("en-parser-chunking.bin")); var model = new opennlp.tools.parser.ParserModel(modelIn); var parser = opennlp.tools.parser.ParserFactory.create(model); var sentence = "The quick brown fox jumps over the lazy dog ."; var topParses = opennlp.tools.cmdline.parser.ParserTool.parseLine(sentence, parser, 1); System.Console.WriteLine(string.Join(";", topParses.Select(x => x.toString()))); Assert.AreEqual(1, topParses.Length); var tree = topParses[0]; Assert.IsNull(tree.getLabel()); }
static Tokenizer() { var modelFile = ConfigurationManager.AppSettings["ModelTokenizer"] ?? string.Empty; if (string.IsNullOrWhiteSpace(modelFile)) { throw new Exception("ModelTokenizer setting not defined in App.Config"); } modelFile = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, modelFile); if (!File.Exists(modelFile)) { throw new FileNotFoundException("Unable to find tokenizer model file at " + modelFile); } ModelIn = new java.io.FileInputStream(modelFile); Model = new TokenizerModel(ModelIn); }
public void SplitSentences() { var txt = File.ReadAllText(@"c:\dev\d-mill\uspe\Data\uspe-1.txt"); txt = Regex.Replace(txt, "\\s+", " "); txt = Regex.Replace(txt, "\\r\\n", ""); txt = Regex.Replace(txt, "MR.\\s+", "MR."); var modelStream = new java.io.FileInputStream("../../Models/en-sent.bin"); var model = new SentenceModel(modelStream); var detector = new SentenceDetectorME(model); var sentences = detector.sentDetect(txt); File.WriteAllLines(@"c:\dev\d-mill\uspe\Data\uspe-sentenced.txt", sentences); }
public static void Main(string[] args) { DirectoryInfo folder = new DirectoryInfo(@"..\..\..\..\..\Dataset"); foreach (var fname in folder.GetFiles()) { String line = File.ReadAllText(fname.FullName); java.io.InputStream modelIn = new java.io.FileInputStream(@"..\..\..\..\..\en-sent.bin"); SentenceModel smodel = new SentenceModel(modelIn); SentenceDetector detector = new SentenceDetectorME(smodel); string[] sents = detector.sentDetect(line); using (StreamWriter sw = new StreamWriter(fname.FullName.Replace(fname.FullName.Substring(fname.FullName.Length - 3), "rtf"))) { foreach (var sent in sents) { sw.WriteLine(sent); } } } }
/// <summary> /// Return the OpenNLP analyzer given its model type (M), the type of the analyzer (T), the filename /// of the model (i.e. en-maxent.bin) and a path to where the Models are lcoated (ModelsPath). /// </summary> public T ResolveOpenNlpTool <M, T>(string modelPath) where T : class where M : class { var modelStream = new java.io.FileInputStream(Path.Combine(ModelsPath, modelPath)); M model; try { model = (M)Activator.CreateInstance(typeof(M), modelStream); } finally { if (modelStream != null) { modelStream.close(); } } return((T)Activator.CreateInstance(typeof(T), model)); }
public NLP() { //loading sentence detector model java.io.FileInputStream modelInpStream = new java.io.FileInputStream("Resources\\en-sent.bin"); SentenceModel sentenceModel = new SentenceModel(modelInpStream); sentenceDetector = new SentenceDetectorME(sentenceModel); //loading tokenizer model modelInpStream = new java.io.FileInputStream("Resources\\en-token.bin"); TokenizerModel tokenizerModel = new TokenizerModel(modelInpStream); tokenizer = new TokenizerME(tokenizerModel); modelInpStream = new java.io.FileInputStream("Resources\\en-pos-maxent.bin"); POSModel posModel = new POSModel(modelInpStream); tagger = new POSTaggerME(posModel); modelInpStream = new java.io.FileInputStream("Resources\\en-chunker.bin"); ChunkerModel chunkerModel = new ChunkerModel(modelInpStream); chunker = new ChunkerME(chunkerModel); modelInpStream = new java.io.FileInputStream("Resources\\en-parser-chunking.bin"); ParserModel parserModel = new ParserModel(modelInpStream); parser = ParserFactory.create(parserModel); //loading stop words list StreamReader sr = new StreamReader("Resources\\english.stop.txt"); string line; while ((line = sr.ReadLine()) != null) { stopwords.Add(Stemming(line)); stopwords.Add(line); } }
public void PartOfSpeechTagger() { using var modelIn = new java.io.FileInputStream(GetModel("en-pos-maxent.bin")); var model = new opennlp.tools.postag.POSModel(modelIn); var tagger = new opennlp.tools.postag.POSTaggerME(model); var sentence = new[] { "Most", "large", "cities", "in", "the", "US", "had", "morning", "and", "afternoon", "newspapers", "." }; var tags = tagger.tag(sentence); System.Console.WriteLine(string.Join(";", tags)); Assert.AreEqual(12, tags.Length); var probs = tagger.probs(); System.Console.WriteLine(string.Join(";", probs)); Assert.AreEqual(12, probs.Length); }
static void Main(string[] args) { StreamWriter sw = new StreamWriter(@"..\..\Data\result.txt"); StreamReader sr = new StreamReader(@"..\..\Data\data.txt"); while (sr.Peek() != -1) { string line = sr.ReadLine(); java.io.InputStream modelIn = new java.io.FileInputStream("en-sent.bin"); SentenceModel smodel = new SentenceModel(modelIn); SentenceDetector detector = new SentenceDetectorME(smodel); string[] sents = detector.sentDetect(line); foreach (var sent in sents) { sw.WriteLine(sent); sw.WriteLine(); } sw.Flush(); } sr.Close(); sw.Close(); }
private static NameFinderME prepareTimeFinder() { var modelInputStream = new java.io.FileInputStream(@"c:\projects\OTAutocompleteDSL\dep\en-ner-time.bin"); //load the name model into a stream var model = new TokenNameFinderModel(modelInputStream); //load the model return new NameFinderME(model); //create the namefinder }
private static opennlp.tools.namefind.NameFinderME prepareNameFinder() { var modelInputStream = new java.io.FileInputStream(@"c:\projects\OTAutocompleteDSL\dep\en-ner-location.bin"); //load the name model into a stream var model = new opennlp.tools.namefind.TokenNameFinderModel(modelInputStream); //load the model return new opennlp.tools.namefind.NameFinderME(model); //create the namefinder }
private opennlp.tools.sentdetect.SentenceDetectorME prepareSentenceDetector() { java.io.FileInputStream sentModelStream = new java.io.FileInputStream(sentenceModelPath); //load the sentence model into a stream opennlp.tools.sentdetect.SentenceModel sentModel = new opennlp.tools.sentdetect.SentenceModel(sentModelStream);// load the model return new opennlp.tools.sentdetect.SentenceDetectorME(sentModel); //create sentence detector }
private opennlp.tools.namefind.NameFinderME prepareNameFinder() { java.io.FileInputStream modelInputStream = new java.io.FileInputStream(nameFinderModelPath); //load the name model into a stream opennlp.tools.namefind.TokenNameFinderModel model = new opennlp.tools.namefind.TokenNameFinderModel(modelInputStream); //load the model return new opennlp.tools.namefind.NameFinderME(model); //create the namefinder }
private SSLSocketFactory getSSLSocketFactory() { SSLSocketFactory factory = null; try { //reading the keyStore path and password from the environment properties string keyStorePath = java.lang.System.getProperty("javax.net.ssl.keyStore"); java.io.FileInputStream keyStoreStream = null; if (keyStorePath != null) { java.io.File file = new java.io.File(keyStorePath); if(file.exists()) keyStoreStream = new java.io.FileInputStream(file); else keyStoreStream = searchDefaultCacerts(); } else keyStoreStream = searchDefaultCacerts(); string keyStorePassWord = java.lang.System.getProperty("javax.net.ssl.keyStorePassword"); if (keyStorePassWord == null) keyStorePassWord = "******"; char[] passphrase = keyStorePassWord.ToCharArray(); //initiating SSLContext SSLContext ctx = SSLContext.getInstance("TLS"); KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); KeyStore ks = KeyStore.getInstance("JKS"); if (keyStoreStream != null) ks.load(keyStoreStream,passphrase); else ks.load(null,null); kmf.init(ks, passphrase); tmf.init(ks); ctx.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); factory = ctx.getSocketFactory(); } catch (Exception e) { factory = null; #if DEBUG Console.WriteLine("Can't get SSL Socket Factory, the exception is {0}, {1}", e.GetType(), e.Message); #endif } return factory; }
/// <summary>Obtains an audio input stream from the File provided.</summary> /// <remarks> /// Obtains an audio input stream from the File provided. /// The File must point to valid audio file data. /// </remarks> /// <param name="file">the File for which the AudioInputStream should be constructed. /// </param> /// <returns> /// an AudioInputStream object based on the audio file data pointed to /// by the File. /// </returns> /// <exception> /// UnsupportedAudioFileException /// if the File does not point to /// a valid audio file data recognized by the system. /// </exception> /// <exception> /// IOException /// if an I/O exception occurs. /// </exception> /// <exception cref="javax.sound.sampled.UnsupportedAudioFileException"></exception> /// <exception cref="System.IO.IOException"></exception> public override javax.sound.sampled.AudioInputStream getAudioInputStream(java.io.File file) { java.io.InputStream inputStream = new java.io.FileInputStream(file); try { return getAudioInputStream(inputStream, (int)file.length()); } catch (javax.sound.sampled.UnsupportedAudioFileException e) { inputStream.close(); throw; } catch (System.IO.IOException e) { inputStream.close(); throw; } }
static void Main(string[] args) { string projectDir = System.IO.Directory.GetParent( System.IO.Directory.GetParent( Environment.CurrentDirectory.ToString()).ToString()).ToString() + "\\"; // the docx 'template' String input_DOCX = projectDir + @"src\samples\resources\ContentControlBind\binding-simple.docx"; // the instance data String input_XML = projectDir + @"src\samples\resources\ContentControlBind\binding-simple-data.xml"; // resulting docx String OUTPUT_DOCX = projectDir + @"OUT_ContentControlsMergeXML.docx"; // Programmatically configure Common Logging // (alternatively, you could do it declaratively in app.config) NameValueCollection commonLoggingproperties = new NameValueCollection(); commonLoggingproperties["showDateTime"] = "false"; commonLoggingproperties["level"] = "INFO"; LogManager.Adapter = new Common.Logging.Simple.ConsoleOutLoggerFactoryAdapter(commonLoggingproperties); ILog log = LogManager.GetCurrentClassLogger(); log.Info("Hello from Common Logging"); // Necessary, if slf4j-api and slf4j-NetCommonLogging are separate DLLs ikvm.runtime.Startup.addBootClassPathAssembly( System.Reflection.Assembly.GetAssembly( typeof(org.slf4j.impl.StaticLoggerBinder))); // Configure to find docx4j.properties // .. add as URL the dir containing docx4j.properties (not the file itself!) Plutext.PropertiesConfigurator.setDocx4jPropertiesDir(projectDir + @"src\samples\resources\"); // Load input_template.docx WordprocessingMLPackage wordMLPackage = org.docx4j.Docx4J.load(new java.io.File(input_DOCX)); // Open the xml stream java.io.FileInputStream xmlStream = new java.io.FileInputStream(new java.io.File(input_XML)); // Do the binding: // FLAG_NONE means that all the steps of the binding will be done, // otherwise you could pass a combination of the following flags: // FLAG_BIND_INSERT_XML: inject the passed XML into the document // FLAG_BIND_BIND_XML: bind the document and the xml (including any OpenDope handling) // FLAG_BIND_REMOVE_SDT: remove the content controls from the document (only the content remains) // FLAG_BIND_REMOVE_XML: remove the custom xml parts from the document //Docx4J.bind(wordMLPackage, xmlStream, Docx4J.FLAG_NONE); //If a document doesn't include the Opendope definitions, eg. the XPathPart, //then the only thing you can do is insert the xml //the example document binding-simple.docx doesn't have an XPathPart.... Docx4J.bind(wordMLPackage, xmlStream, Docx4J.FLAG_BIND_INSERT_XML & Docx4J.FLAG_BIND_BIND_XML); //Save the document Docx4J.save(wordMLPackage, new java.io.File(OUTPUT_DOCX), Docx4J.FLAG_NONE); log.Info("Saved: " + OUTPUT_DOCX); }
private opennlp.tools.tokenize.TokenizerME prepareTokenizer() { java.io.FileInputStream tokenInputStream = new java.io.FileInputStream(tokenModelPath); //load the token model into a stream opennlp.tools.tokenize.TokenizerModel tokenModel = new opennlp.tools.tokenize.TokenizerModel(tokenInputStream); //load the token model return new opennlp.tools.tokenize.TokenizerME(tokenModel); //create the tokenizer }
/* /********************************************************** /* Parser factories (new ones, as per [Issue-25]) /********************************************************** */ /// <summary> /// Method for constructing JSON parser instance to parse /// contents of specified file. /// </summary> /// <remarks> /// Method for constructing JSON parser instance to parse /// contents of specified file. Encoding is auto-detected /// from contents according to JSON specification recommended /// mechanism. /// <p> /// Underlying input stream (needed for reading contents) /// will be <b>owned</b> (and managed, i.e. closed as need be) by /// the parser, since caller has no access to it. /// </remarks> /// <param name="f">File that contains JSON content to parse</param> /// <since>2.1</since> /// <exception cref="System.IO.IOException"/> /// <exception cref="com.fasterxml.jackson.core.JsonParseException"/> public virtual com.fasterxml.jackson.core.JsonParser createParser(Sharpen.FilePath f) { // true, since we create InputStream from File com.fasterxml.jackson.core.io.IOContext ctxt = _createContext(f, true); Sharpen.InputStream @in = new java.io.FileInputStream(f); return _createParser(_decorate(@in, ctxt), ctxt); }
private bool AddToZip(java.io.FileOutputStream fos, java.util.zip.ZipOutputStream zos, string sourceFile, string destName) { try { Thread.Sleep(6000); java.io.FileInputStream fis = new java.io.FileInputStream(sourceFile); java.util.zip.ZipEntry ze = new java.util.zip.ZipEntry(destName); zos.putNextEntry(ze); byte[] buffer = new byte[1024]; int len; while ((len = fis.read(buffer)) >= 0) { zos.write(buffer, 0, len); } zos.closeEntry(); fis.close(); } catch (Exception) { File.AppendAllText("log_save.txt", DateTime.Now.ToShortDateString() + " " + DateTime.Now.ToShortTimeString() + ": " + sourceFile + Environment.NewLine); AddToZip( fos, zos, sourceFile, destName); } return true; }