示例#1
0
        static void ExportPlantsTableToMediawikiAPI()
        {
            string url=Globals.Options.GetElementAsString("xml.Options.Mediawiki.URL");
            string username=Globals.Options.GetElementAsString("xml.Options.Mediawiki.Username");
            string password=Globals.Options.GetElementAsString("xml.Options.Mediawiki.Passwort");

            Site wiki=new Site(url, username, password);

            Page page=new Page(wiki, "Liste der Pflanzen");
            page.Load();

            string monsterlist=GetPlantAsMediaWiki();

            //Monster Vorkommen ermitteln
            string text=page.text;
            string start="{{Anker|AutomaticStartPlantList}}";
            string end="{Anker|AutomaticEndPlantList}}";
            int idxBeginInfobox=text.IndexOf(start, 0);
            int idxEndInfobox=text.IndexOf(end, 0);

            int lengthOfString=(idxEndInfobox-idxBeginInfobox)-start.Length-1;

            string monsterdrops=text.Substring(idxBeginInfobox+start.Length, lengthOfString);
            if(monsterdrops!="\n")
            {
                text=text.Replace(monsterdrops, "");
            }

            string replaceString="{{Anker|AutomaticStartPlantList}}\n"+monsterlist;
            text=text.Replace(start, replaceString);

            if(page.text!=text)
            {
                page.text=text;
                page.Save("Bot: Liste der Pflanzen aktualisiert.", true);
            }
        }
示例#2
0
        static void UpdateLuaInMediaWiki()
        {
            if(Globals.folder_root=="")
            {
                Console.WriteLine("Bitte geben sie in den Optionen den Pfad zum Invertika Repository an.");
                return;
            }

            if(Globals.Options.GetElementAsString("xml.Options.Mediawiki.URL")=="")
            {
                Console.WriteLine("Bitte geben sie eine Mediawiki URL in den Optionen an.");
                return;
            }

            if(Globals.Options.GetElementAsString("xml.Options.Mediawiki.Username")=="")
            {
                Console.WriteLine("Bitte geben sie einen Mediawiki Nutzernamen in den Optionen an.");
                return;
            }

            if(Globals.Options.GetElementAsString("xml.Options.Mediawiki.Passwort")=="")
            {
                Console.WriteLine("Bitte geben sie einen Mediawiki Passwort in den Optionen an.");
                return;
            }

            string url=Globals.Options.GetElementAsString("xml.Options.Mediawiki.URL");
            string username=Globals.Options.GetElementAsString("xml.Options.Mediawiki.Username");
            string password=Globals.Options.GetElementAsString("xml.Options.Mediawiki.Passwort");

            Site wiki=new Site(url, username, password);

            List<string> luafiles=FileSystem.GetFiles(Globals.folder_data_scripts_libs, true, "*.lua");

            foreach(string file in luafiles)
            {
                LuaDocParser ldp=new LuaDocParser(file);
                LucDocReturn ret=ldp.ExportLuaDocToMediaWiki();

                switch(ret.DocType)
                {
                    case LuaDocType.Module:
                        {
                            Page page=new Page(wiki, ret.Name+" (Lua Modul)");

                            page.Load();

                            string text=page.text;

                            if(text=="")
                            {
                                List<string> lines=new List<string>();

                                lines.Add("{{Status_Green}}");
                                lines.Add("{{Automatic}}");

                                lines.Add("");

                                lines.Add("==Funktionen==");

                                //Funktions
                                lines.Add("{{Anker|AutomaticStartFunctions}}");
                                lines.AddRange(ret.Functions);
                                lines.Add("{{Anker|AutomaticEndFunctions}}");
                                lines.Add("");
                                lines.Add("[[Kategorie: Lua]]");
                                lines.Add("[[Kategorie: Lua Modul]]");

                                foreach(string ll in lines)
                                {
                                    text+=ll+"\n";
                                }

                                if(page.text!=text)
                                {
                                    page.text=text;
                                    page.Save("Sourcecode Dokumentation erstellt.", false);
                                }
                            }
                            else //Entsprechende Bereiche ersetzen
                            {
                                string start="{{Anker|AutomaticStartFunctions}}";
                                string end="{Anker|AutomaticEndFunctions}}";
                                int idxBeginInfobox=text.IndexOf(start, 0);
                                int idxEndInfobox=text.IndexOf(end, 0);

                                int lengthOfString=(idxEndInfobox-idxBeginInfobox)-start.Length-1;
                                string vorkommen=text.Substring(idxBeginInfobox+start.Length, lengthOfString);

                                if(vorkommen!="\n")
                                {
                                    text=text.Replace(vorkommen, "");
                                }

                                string replaceString="{{Anker|AutomaticStartFunctions}}\n";

                                foreach(string ll in ret.Functions)
                                {
                                    replaceString+=ll+"\n";
                                }

                                text=text.Replace(start, replaceString);

                                if(page.text!=text)
                                {
                                    page.text=text;
                                    page.Save("Sourcecode Dokumentation aktualisiert.", true);
                                }
                            }
                            break;
                        }
                    default:
                        {
                            break;
                        }
                }

            }

            Console.WriteLine("Lua Dokumentation aktualisiert.");
        }
示例#3
0
 /// <summary>Gets page titles and page text from local XML dump.
 /// This function consumes much resources.</summary>
 /// <param name="filePathName">The path to and name of the XML dump file as string.</param>
 public void FillAndLoadFromXmlDump(string filePathName)
 {
     Console.WriteLine(Bot.Msg("Loading pages from XML dump..."));
     XmlReader reader = XmlReader.Create(filePathName);
     while (reader.ReadToFollowing("page")) {
         Page p = new Page(site);
         p.ParsePageXml(reader.ReadOuterXml());
         pages.Add(p);
     }
     reader.Close();
     Console.WriteLine(Bot.Msg("XML dump loaded successfully."));
 }
示例#4
0
 /// <summary>Gets page titles and page texts from all ".txt" files in the specified
 /// directory (folder). Each file becomes a page. Page titles are constructed from
 /// file names. Page text is read from file contents. If any Unicode numeric codes
 /// (also known as numeric character references or NCRs) of the forbidden characters
 /// (forbidden in filenames) are recognized in filenames, those codes are converted
 /// to characters (e.g. "&#x7c;" is converted to "|").</summary>
 /// <param name="dirPath">The path and name of a directory (folder)
 /// to load files from.</param>
 public void FillAndLoadFromFiles(string dirPath)
 {
     foreach (string fileName in Directory.GetFiles(dirPath, "*.txt")) {
         Page p = new Page(site, Path.GetFileNameWithoutExtension(fileName));
         p.title = p.title.Replace("&#x22;", "\"");
         p.title = p.title.Replace("&#x3c;", "<");
         p.title = p.title.Replace("&#x3e;", ">");
         p.title = p.title.Replace("&#x3f;", "?");
         p.title = p.title.Replace("&#x3a;", ":");
         p.title = p.title.Replace("&#x5c;", "\\");
         p.title = p.title.Replace("&#x2f;", "/");
         p.title = p.title.Replace("&#x2a;", "*");
         p.title = p.title.Replace("&#x7c;", "|");
         p.LoadFromFile(fileName);
         pages.Add(p);
     }
 }
示例#5
0
 /// <summary>This internal function compares pages by titles (alphabetically).</summary>
 /// <returns>Returns 1 if x is greater, -1 if y is greater, 0 if equal.</returns>
 public int ComparePagesByTitles(Page x, Page y)
 {
     int r = string.Compare(x.title, y.title, false, site.langCulture);
     return (r != 0) ? r/Math.Abs(r) : 0;
 }
示例#6
0
        /// <summary>Loads texts and metadata (revision ID, timestamp, last comment,
        /// last contributor, minor edit mark) for pages in this PageList.
        /// Non-existent pages will be automatically removed from the PageList.
        /// Please, don't use this function when going to edit big amount of pages on
        /// popular public wikis, as it compromises edit conflict detection. In that case,
        /// each page's text should be loaded individually right before its processing
        /// and saving.</summary>
        public void LoadWithMetadata()
        {
            if (IsEmpty())
                throw new WikiBotException(Bot.Msg("The PageList is empty. Nothing to load."));
            Console.WriteLine(Bot.Msg("Loading {0} pages..."), pages.Count);

            string res = site.indexPath + "?title=Special:Export&action=submit";
            string postData = "curonly=True&pages=";
            foreach (Page page in pages)
                postData += HttpUtility.UrlEncode(page.title) + "\r\n";
            string src = site.PostDataAndGetResult(res, postData);
            XmlReader reader = XmlReader.Create(new StringReader(src));
            PageList pl = new PageList(site);
            while (reader.ReadToFollowing("page")) {
                Page p = new Page(site);
                p.ParsePageXml(reader.ReadOuterXml());
                pl.Add(p);
            }
            reader.Close();
            if (pages.Count > 0) {
                Clear();
                pages = pl.pages;
                return;
            }
            else {    // FALLBACK, use alternative parsing way, XPath
                Console.WriteLine(
                    Bot.Msg("XML parsing failed, switching to alternative parser..."), pages.Count);
                src = Bot.RemoveXMLRootAttributes(src);
                StringReader strReader = new StringReader(src);
                XPathDocument doc = new XPathDocument(strReader);
                strReader.Close();
                XPathNavigator nav = doc.CreateNavigator();
                foreach (Page page in pages) {
                    if (page.title.Contains("'")) {    // There's no good way to escape "'" in XPath
                        page.LoadWithMetadata();
                        continue;
                    }
                    string query = "//page[title='" + page.title + "']/";
                    try {
                        page.text =
                            nav.SelectSingleNode(query + "revision/text").InnerXml;
                    }
                    catch (System.NullReferenceException) {
                        continue;
                    }
                    page.text = HttpUtility.HtmlDecode(page.text);
                    page.pageId = nav.SelectSingleNode(query + "id").InnerXml;
                    try {
                        page.lastUser = nav.SelectSingleNode(query +
                            "revision/contributor/username").InnerXml;
                        page.lastUserId = nav.SelectSingleNode(query +
                            "revision/contributor/id").InnerXml;
                    }
                    catch (System.NullReferenceException) {
                        page.lastUser = nav.SelectSingleNode(query +
                            "revision/contributor/ip").InnerXml;
                    }
                    page.lastUser = HttpUtility.HtmlDecode(page.lastUser);
                    page.revision = nav.SelectSingleNode(query + "revision/id").InnerXml;
                    page.lastMinorEdit = (nav.SelectSingleNode(query +
                        "revision/minor") == null) ? false : true;
                    try {
                        page.comment = nav.SelectSingleNode(query + "revision/comment").InnerXml;
                        page.comment = HttpUtility.HtmlDecode(page.comment);
                    }
                    catch (System.NullReferenceException) {;}
                    page.timestamp =
                        nav.SelectSingleNode(query + "revision/timestamp").ValueAsDateTime;
                }

                if (string.IsNullOrEmpty(pages[0].text)) {    // FALLBACK 2, load pages one-by-one
                    foreach (Page page in pages)
                        page.LoadWithMetadata();
                }
            }
        }
示例#7
0
 /// <summary>Gets page titles for this PageList from links in some wiki page. All links
 /// will be retrieved, from all namespaces, except interwiki links to other
 /// sites. Use FilterNamespaces() or RemoveNamespaces() function to remove pages from
 /// unwanted namespaces (categories, images, etc.)</summary>
 /// <param name="pageTitle">Page title as string.</param>
 /// <example><code>pageList.FillFromAllPageLinks("Art");</code></example>
 public void FillFromPageLinks(string pageTitle)
 {
     if (string.IsNullOrEmpty(pageTitle))
         throw new ArgumentNullException("pageTitle");
     Regex wikiLinkRegex = new Regex(@"\[\[ *:*(.+?)(]]|\|)");
     Page page = new Page(site, pageTitle);
     page.Load();
     MatchCollection matches = wikiLinkRegex.Matches(page.text);
     Regex outWikiLink = new Regex("^(" + site.generalData["interwiki"] + "):");
     foreach (Match match in matches)
         if (!outWikiLink.IsMatch(match.Groups[1].Value))
             pages.Add(new Page(site, match.Groups[1].Value));
     Console.WriteLine(
         Bot.Msg("PageList filled with links, found on \"{0}\" page."), pageTitle);
 }
示例#8
0
 /// <summary>Gets page titles for this PageList from Google search results.
 /// The function gets pages of all namespaces and it does not clear
 /// the existing PageList, so new pages will be added.</summary>
 /// <param name="searchStr">Words to search for. Use quotes to find exact phrases.</param>
 /// <param name="limit">Maximum number of page titles to get.</param>
 public void FillFromGoogleSearchResults(string searchStr, int limit)
 {
     if (string.IsNullOrEmpty(searchStr))
         throw new ArgumentNullException("searchStr");
     if (limit <= 0)
         throw new ArgumentOutOfRangeException("limit", Bot.Msg("Limit must be positive."));
     // TO DO: paging
     Uri res = new Uri("http://www.google.com/search?q=" + HttpUtility.UrlEncode(searchStr) +
         "+site:" + site.address.Substring(site.address.IndexOf("://") + 3) +
         "&num=" + limit.ToString());
     string src = Bot.GetWebResource(res, "");
     string relativeIndexPath = site.indexPath.Substring(site.indexPath.IndexOf('/', 10));
     string googleLinkToPagePattern = "<h3[^>]*><a href=\"(?<double_escape>/url\\?q=)?" +
         Regex.Escape(site.address).Replace("https:", "https?:") + "(?:" +
         (!string.IsNullOrEmpty(site.shortPath) ?
             Regex.Escape(site.shortPath) + "|" : "") +
         Regex.Escape(relativeIndexPath) + "\\?title=)?" + "(?<title>[^&\"]+)";
     Regex GoogleLinkToPageRegex = new Regex(googleLinkToPagePattern);
     MatchCollection matches = GoogleLinkToPageRegex.Matches(src);
     foreach (Match match in matches) {
         string title = HttpUtility.UrlDecode(match.Groups["title"].Value);
         if (title == "/") {
             if (site.messages == null)
                 site.LoadMediawikiMessages(true);
             string mainPageTitle = site.messages["mainpage"];
             Page p = new Page(site, mainPageTitle);
             p.ResolveRedirect();
             pages.Add(p);
         }
         else {
             if (!string.IsNullOrEmpty(match.Groups["double_escape"].Value))
                 title = HttpUtility.UrlDecode(title);
             pages.Add(new Page(site, title));
         }
     }
     Console.WriteLine(Bot.Msg("PageList filled with www.google.com search results."));
 }
示例#9
0
        /// <summary>Gets page history and fills this PageList with specified number of recent page
        /// revisions. Only revision identifiers, user names, timestamps and comments are
        /// loaded, not the texts. Call Load() to load the texts of page revisions.</summary>
        /// <param name="pageTitle">Page to get history of.</param>
        /// <param name="limit">Number of last page revisions to get.</param>
        public void FillFromPageHistory(string pageTitle, int limit)
        {
            if (string.IsNullOrEmpty(pageTitle))
                throw new ArgumentNullException("pageTitle");
            if (limit <= 0)
                throw new ArgumentOutOfRangeException("limit",
                    Bot.Msg("Quantity must be positive."));
            Console.WriteLine(
                Bot.Msg("Getting {0} last revisons of \"{1}\" page..."), limit, pageTitle);

            if (site.useApi) {
                string queryUri = site.apiPath + "?action=query&prop=revisions&titles=" +
                    HttpUtility.UrlEncode(pageTitle) + "&rvprop=ids|user|comment|timestamp" +
                    "&format=xml&rvlimit=" + limit.ToString();
                string src = site.GetWebPage(queryUri);
                Page p;
                using (XmlReader reader = XmlReader.Create(new StringReader(src))) {
                    reader.ReadToFollowing("api");
                    reader.Read();
                    if (reader.Name == "error")
                        Console.Error.WriteLine(Bot.Msg("Error: {0}"), reader.GetAttribute("info"));
                    while (reader.ReadToFollowing("rev")) {
                        p = new Page(site, pageTitle);
                        p.revision = reader.GetAttribute("revid");
                        p.lastUser = reader.GetAttribute("user");
                        p.comment = reader.GetAttribute("comment");
                        p.timestamp =
                            DateTime.Parse(reader.GetAttribute("timestamp")).ToUniversalTime();
                        pages.Add(p);
                    }
                }
            }
            else {
                // TO DO: paging
                string res = site.indexPath + "?title=" +
                    HttpUtility.UrlEncode(pageTitle) + "&limit=" + limit.ToString() +
                    "&action=history";
                string src = site.GetWebPage(res);
                src = src.Substring(src.IndexOf("<ul id=\"pagehistory\">"));
                src = src.Substring(0, src.IndexOf("</ul>") + 5);
                Page p = null;
                using (XmlReader reader = Bot.GetXMLReader(src)) {
                    while (reader.Read()) {
                        if (reader.Name == "li" && reader.NodeType == XmlNodeType.Element) {
                            p = new Page(site, pageTitle);
                            p.lastMinorEdit = false;
                            p.comment = "";
                        }
                        else if (reader.Name == "span"
                            && reader["class"] == "mw-history-histlinks") {
                            reader.ReadToFollowing("a");
                            p.revision = reader["href"].Substring(
                                reader["href"].IndexOf("oldid=") + 6);
                            DateTime.TryParse(reader.ReadString(),
                                site.regCulture, DateTimeStyles.AssumeLocal, out p.timestamp);
                        }
                        else if (reader.Name == "span" && reader["class"] == "history-user") {
                            reader.ReadToFollowing("a");
                            p.lastUser = reader.ReadString();
                        }
                        else if (reader.Name == "abbr")
                            p.lastMinorEdit = true;
                        else if (reader.Name == "span" && reader["class"] == "history-size")
                            int.TryParse(Regex.Replace(reader.ReadString(), @"[^-+\d]", ""),
                                out p.lastBytesModified);
                        else if (reader.Name == "span" && reader["class"] == "comment") {
                            p.comment = Regex.Replace(reader.ReadInnerXml().Trim(), "<.+?>", "");
                            p.comment = p.comment.Substring(1, p.comment.Length - 2);    // brackets
                        }
                        if (reader.Name == "li" && reader.NodeType == XmlNodeType.EndElement)
                            pages.Add(p);
                    }
                }
            }

            Console.WriteLine(Bot.Msg("PageList filled with {0} last revisons of \"{1}\" page..."),
                pages.Count, pageTitle);
        }
示例#10
0
 /// <summary>This function returns true, if this PageList contains page with the same title
 /// and same revision ID with page passed as a parameter. Before comparison this function 
 /// corrects all namespace prefixes in this PageList and in title of Page passed
 /// as a parameter.</summary>
 /// <param name="page">Page object to search for in this PageList.</param>
 /// <returns>Returns bool value.</returns>
 public bool Contains(Page page)
 {
     page.CorrectNsPrefix();
     CorrectNsPrefixes();
     foreach (Page p in pages)
         if (p.title == page.title && p.revision == page.revision)
             return true;
     return false;
 }
示例#11
0
 /// <summary>Inserts an element into this PageList at the specified index.</summary>
 /// <param name="page">Page object to insert.</param>
 /// <param name="index">Zero-based index.</param>
 public void Insert(Page page, int index)
 {
     pages.Insert(index, page);
 }
示例#12
0
 /// <summary>This function adds specified page to the end of this PageList.</summary>
 /// <param name="page">Page object to add.</param>
 public void Add(Page page)
 {
     pages.Add(page);
 }
示例#13
0
 /// <summary>This function allows to set individual pages in this PageList.
 /// But it's better to use simple pageList[i] index, when it is possible.</summary>
 /// <param name="page">Page object to set in this PageList.</param>
 /// <param name="index">Zero-based index.</param>
 /// <returns>Returns the Page object.</returns>
 public void SetPageAtIndex(Page page, int index)
 {
     pages[index] = page;
 }