/*///<summary>Update may be implemented when versioning is improved.</summary> * public static void Update(WikiPage wikiPage){ * Insert(wikiPage); * //if(RemotingClient.RemotingRole==RemotingRole.ClientWeb){ * // Meth.GetVoid(MethodBase.GetCurrentMethod(),wikiPage); * // return; * //} * //Crud.WikiPageCrud.Update(wikiPage); * }*/ ///<summary>Surround with try/catch. Typically returns something similar to \\SERVER\OpenDentImages\Wiki</summary> public static string GetWikiPath() { //No need to check RemotingRole; no call to db. string wikiPath; if (PrefC.AtoZfolderUsed == DataStorageType.InDatabase) { throw new ApplicationException(Lans.g("WikiPages", "Must be using AtoZ folders.")); } wikiPath = CloudStorage.PathTidy(Path.Combine(ImageStore.GetPreferredAtoZpath(), "Wiki")); if (PrefC.AtoZfolderUsed == DataStorageType.LocalAtoZ && !Directory.Exists(wikiPath)) { Directory.CreateDirectory(wikiPath); } return(wikiPath); }
///<summary>Surround with try/catch. Also aggregates the content into the master page (unless specified to not). ///If isPreviewOnly, then the internal links will not be checked to see if the page exists, as it would make the refresh sluggish. ///And isPreviewOnly also changes the pointer so that the page looks non-clickable. ///For emails, this only gets called while in the email edit window. The returned string will be used to switch between plain and html text. ///</summary> public static string TranslateToXhtml(string markupText, bool isPreviewOnly, bool hasWikiPageTitles = false, bool isEmail = false, bool canAggregate = true) { //No need to check RemotingRole; no call to db. #region Basic Xml Validation string s = markupText; MatchCollection matches; //"<",">", and "&"----------------------------------------------------------------------------------------------------------- s = s.Replace("&", "&"); s = s.Replace("&<", "<"); //because "&" was changed to "&" in the line above. s = s.Replace("&>", ">"); //because "&" was changed to "&" in the line above. s = "<body>" + s + "</body>"; XmlDocument doc = new XmlDocument(); using (StringReader reader = new StringReader(s)) { doc.Load(reader); } #endregion #region regex replacements if (isEmail) { s = TranslateEmailImages(s); //handle email images and wiki images separately. } else { //[[img:myimage.gif]]------------------------------------------------------------------------------------------------------------ matches = Regex.Matches(s, _odWikiImage); foreach (Match match in matches) { string imgName = match.Value.Substring(match.Value.IndexOf(":") + 1).TrimEnd("]".ToCharArray()); string wikiPath = ""; try { wikiPath = WikiPages.GetWikiPath(); } catch (Exception ex) { ex.DoNothing(); throw; } string fullPath = FileAtoZ.CombinePaths(wikiPath, POut.String(imgName)); if (CloudStorage.IsCloudStorage) { //WebBrowser needs to have a local file to open, so we download the images to temp files. OpenDentalCloud.Core.TaskStateDownload state = CloudStorage.Download(Path.GetDirectoryName(fullPath), Path.GetFileName(fullPath)); string tempFile = PrefC.GetRandomTempFile(Path.GetExtension(fullPath)); File.WriteAllBytes(tempFile, state.FileContent); fullPath = tempFile; } s = s.Replace(match.Value, "<img src=\"file:///" + fullPath.Replace("\\", "/") + "\"></img>"); } //[[keywords: key1, key2, etc.]]------------------------------------------------------------------------------------------------ matches = Regex.Matches(s, _odWikiKeyword); foreach (Match match in matches) //should be only one { s = s.Replace(match.Value, "<span class=\"keywords\">keywords:" + match.Value.Substring(11).TrimEnd("]".ToCharArray()) + "</span>"); } //[[file:C:\eaula.txt]]------------------------------------------------------------------------------------------------ matches = Regex.Matches(s, _odWikiFile); foreach (Match match in matches) { string fileName = match.Value.Replace("[[file:", "").TrimEnd(']'); s = s.Replace(match.Value, "<a href=\"wikifile:" + fileName + "\">file:" + fileName + "</a>"); } //[[folder:\\serverfiles\storage\]]------------------------------------------------------------------------------------------------ matches = Regex.Matches(s, _odWikiFolder); foreach (Match match in matches) { string folderName = match.Value.Replace("[[folder:", "").TrimEnd(']'); s = s.Replace(match.Value, "<a href=\"folder:" + folderName + "\">folder:" + folderName + "</a>"); } //[[filecloud:AtoZ/SheetImages/happyclown.jpg]]------------------------------------------------------------------------------------------------ matches = Regex.Matches(s, _odWikiFilecloud); foreach (Match match in matches) { string fileName = CloudStorage.PathTidy(match.Value.Replace("[[filecloud:", "").TrimEnd(']')); s = s.Replace(match.Value, "<a href=\"wikifilecloud:" + fileName + "\">filecloud:" + fileName + "</a>"); } //[[foldercloud:AtoZ/PenguinPictures/]]------------------------------------------------------------------------------------------------ matches = Regex.Matches(s, _odWikiFoldercloud); foreach (Match match in matches) { string folderName = CloudStorage.PathTidy(match.Value.Replace("[[foldercloud:", "").TrimEnd(']')); s = s.Replace(match.Value, "<a href=\"foldercloud:" + folderName + "\">foldercloud:" + folderName + "</a>"); } } //Color and text are for both wiki and email. It's important we do this before Internal Link or else the translation may not work. //[[color:red|text]]---------------------------------------------------------------------------------------------------------------- matches = Regex.Matches(s, _odWikiColor); //.*? matches as few as possible. foreach (Match match in matches) { //string[] paragraphs = match.Value.Split(new string[] { "\n" },StringSplitOptions.None); string tempText = "<span style=\"color:"; string[] tokens = match.Value.Split('|'); if (tokens.Length < 2) //not enough tokens { continue; } if (tokens[0].Split(':').Length != 2) //Must have a color token and a color value seperated by a colon, no more no less. { continue; } for (int i = 0; i < tokens.Length; i++) { if (i == 0) { tempText += tokens[0].Split(':')[1] + ";\">"; //close <span> tag continue; } tempText += (i > 1?"|":"") + tokens[i]; } tempText = tempText.TrimEnd(']'); tempText += "</span>"; s = s.Replace(match.Value, tempText); } //[[font-family:courier|text]]---------------------------------------------------------------------------------------------------------------- matches = Regex.Matches(s, _odWikiFont); //.*? matches as few as possible. foreach (Match match in matches) { //string[] paragraphs = match.Value.Split(new string[] { "\n" },StringSplitOptions.None); string tempText = "<span style=\"font-family:"; string[] tokens = match.Value.Split('|'); if (tokens.Length < 2) //not enough tokens { continue; } if (tokens[0].Split(':').Length != 2) //Must have a color token and a color value seperated by a colon, no more no less. { continue; } for (int i = 0; i < tokens.Length; i++) { if (i == 0) { tempText += tokens[0].Split(':')[1] + ";\">"; //close <span> tag continue; } tempText += (i > 1?"|":"") + tokens[i]; } tempText = tempText.TrimEnd(']'); tempText += "</span>"; s = s.Replace(match.Value, tempText); } if (!isEmail) { //[[InternalLink]]-------------------------------------------------------------------------------------------------------------- matches = Regex.Matches(s, @"\[\[.+?\]\]"); List <string> pageNamesToCheck = new List <string>(); List <bool> pageNamesExist = new List <bool>(); string styleNotExists = ""; if (hasWikiPageTitles) { if (!isPreviewOnly) { foreach (Match match in matches) { //The '&' was replaced with '&' above, so we change it back before looking for a wiki page with that name. pageNamesToCheck.Add(match.Value.Trim('[', ']').Replace("&", "&")); } if (pageNamesToCheck.Count > 0) { pageNamesExist = WikiPages.CheckPageNamesExist(pageNamesToCheck); //this gets a list of bools for all pagenames in one shot. One query. } } foreach (Match match in matches) { styleNotExists = ""; if (!isPreviewOnly) { //The '&' was replaced with '&' above, so we change it back before looking for a wiki page with that name. string pageName = match.Value.Trim('[', ']').Replace("&", "&"); int idx = pageNamesToCheck.IndexOf(pageName); if (!pageNamesExist[idx]) { styleNotExists = "class='PageNotExists' "; } } s = s.Replace(match.Value, "<a " + styleNotExists + "href=\"" + "wiki:" + match.Value.Trim('[', ']') /*.Replace(" ","_")*/ + "\">" + match.Value.Trim('[', ']') + "</a>"); } } else { List <long> listWikiPageNums = WikiPages.GetWikiPageNumsFromPageContent(s); List <WikiPage> listWikiPages = WikiPages.GetWikiPages(listWikiPageNums); int numInvalid = 1; foreach (Match match in matches) { WikiPage wp = listWikiPages.FirstOrDefault(x => x.WikiPageNum == PIn.Long(match.Value.TrimStart('[').TrimEnd(']'))); string pageName; if (wp != null) { pageName = wp.PageTitle; } else { pageName = "INVALID WIKIPAGE LINK " + numInvalid++; } if (!isPreviewOnly) { styleNotExists = ""; if (wp == null) { styleNotExists = "class='PageNotExists' "; } } pageName = pageName.Replace("&", "&").Replace("&<", "<").Replace("&>", ">"); string replace = "<a " + styleNotExists + "href=\"" + "wiki:" + pageName /*.Replace(" ","_")*/ + "\">" + pageName + "</a>"; Regex regex = new Regex(Regex.Escape(match.Value)); //Replace the first instance of the match with the wiki page name (or unknown if not found). s = regex.Replace(s, replace, 1); } } } //Unordered List---------------------------------------------------------------------------------------------------------------- //Instead of using a regex, this will hunt through the rows in sequence. //later nesting by running ***, then **, then * s = ProcessList(s, "*"); //numbered list--------------------------------------------------------------------------------------------------------------------- s = ProcessList(s, "#"); //table------------------------------------------------------------------------------------------------------------------------- //{| //!Width="100"|Column Heading 1!!Width="150"|Column Heading 2!!Width=""|Column Heading 3 //|- //|Cell 1||Cell 2||Cell 3 //|- //|Cell A||Cell B||Cell C //|} //There are many ways to parse this. Our strategy is to do it in a way that the generated xml is never invalid. //As the user types, the above example will frequently be in a state of partial completeness, and the parsing should gracefully continue anyway. //rigorous enforcement only happens when validating during a save, not here. matches = Regex.Matches(s, _odWikiTable, RegexOptions.Singleline); foreach (Match match in matches) { //If there isn't a new line before the start of the table markup or after the end, the match group value will be an empty string //Tables must start with "'newline'{|" and end with "|}'newline'" string tableStrOrig = match.Value; StringBuilder strbTable = new StringBuilder(); string[] lines = tableStrOrig.Split(new string[] { "{|\n", "\n|-\n", "\n|}" }, StringSplitOptions.RemoveEmptyEntries); strbTable.AppendLine("<table>"); List <string> colWidths = new List <string>(); for (int i = 0; i < lines.Length; i++) { if (lines[i].StartsWith("!")) //header { strbTable.AppendLine("<tr>"); lines[i] = lines[i].Substring(1); //strips off the leading ! string[] cells = lines[i].Split(new string[] { "!!" }, StringSplitOptions.None); colWidths.Clear(); for (int c = 0; c < cells.Length; c++) { if (Regex.IsMatch(cells[c], @"(Width="")\d+""\|")) //e.g. Width="90"| { strbTable.Append("<th "); string width = cells[c].Substring(7); //90"|Column Heading 1 width = width.Substring(0, width.IndexOf("\"")); //90 colWidths.Add(width); strbTable.Append("Width=\"" + width + "\">"); strbTable.Append(ProcessParagraph(cells[c].Substring(cells[c].IndexOf("|") + 1), false)); //surround with p tags. Allow CR in header. strbTable.AppendLine("</th>"); } else { strbTable.Append("<th>"); strbTable.Append(ProcessParagraph(cells[c], false)); //surround with p tags. Allow CR in header. strbTable.AppendLine("</th>"); } } strbTable.AppendLine("</tr>"); } else if (lines[i].Trim() == "|-") { //totally ignore these rows } else //normal row { strbTable.AppendLine("<tr>"); lines[i] = lines[i].Substring(1); //strips off the leading | string[] cells = lines[i].Split(new string[] { "||" }, StringSplitOptions.None); for (int c = 0; c < cells.Length; c++) { strbTable.Append("<td Width=\"" + colWidths[c] + "\">"); strbTable.Append(ProcessParagraph(cells[c], false)); strbTable.AppendLine("</td>"); } strbTable.AppendLine("</tr>"); } } strbTable.Append("</table>"); s = s.Replace(tableStrOrig, strbTable.ToString()); } #endregion regex replacements #region paragraph grouping StringBuilder strbSnew = new StringBuilder(); //a paragraph is defined as all text between sibling tags, even if just a \n. int iScanInParagraph = 0; //scan starting at the beginning of s. S gets chopped from the start each time we grab a paragraph or a sibiling element. //The scanning position represents the verified paragraph content, and does not advance beyond that. //move <body> tag over. strbSnew.Append("<body>"); s = s.Substring(6); bool startsWithCR = false; //todo: handle one leading CR if there is no text preceding it. if (s.StartsWith("\n")) { startsWithCR = true; } string tagName; Match tagCurMatch; while (true) //loop to either construct a paragraph, or to immediately add the next tag to strbSnew. { iScanInParagraph = s.IndexOf("<", iScanInParagraph); //Advance the scanner to the start of the next tag if (iScanInParagraph == -1) //there aren't any more tags, so current paragraph goes to end of string. This won't happen { throw new ApplicationException(Lans.g("WikiPages", "No tags found.")); //strbSnew.Append(ProcessParagraph(s)); } if (s.Substring(iScanInParagraph).StartsWith("</body>")) { strbSnew.Append(ProcessParagraph(s.Substring(0, iScanInParagraph), startsWithCR)); //startsWithCR=false; //strbSnew.Append("</body>"); s = ""; iScanInParagraph = 0; break; } tagName = ""; tagCurMatch = Regex.Match(s.Substring(iScanInParagraph), "^<.*?>"); //regMatch);//.*? means any char, zero or more, as few as possible if (tagCurMatch == null) { //shouldn't happen unless closing bracket is missing throw new ApplicationException(Lans.g("WikiPages", "Unexpected tag:") + " " + s.Substring(iScanInParagraph)); } if (tagCurMatch.Value.Trim('<', '>').EndsWith("/")) { //self terminating tags NOT are allowed //this should catch all non-allowed self-terminating tags i.e. <br />, <inherits />, etc... throw new ApplicationException(Lans.g("WikiPages", "All elements must have a beginning and ending tag. Unexpected tag:") + " " + s.Substring(iScanInParagraph)); } //Nesting of identical tags causes problems: //<h1><h1>some text</h1></h1> //The first <h1> will match with the first </h1>. //We don't have time to support this outlier, so we will catch it in the validator when they save. //One possible strategy here might be: //idxNestedDuplicate=s.IndexOf("<"+tagName+">"); //if(idxNestedDuplicate<s.IndexOf("</"+tagName+">"){ // //} //Another possible strategy might be to use regular expressions. tagName = tagCurMatch.Value.Split(new string[] { "<", " ", ">" }, StringSplitOptions.RemoveEmptyEntries)[0]; //works with tags like <i>, <span ...>, and <img .../> if (s.IndexOf("</" + tagName + ">") == -1) //this will happen if no ending tag. { throw new ApplicationException(Lans.g("WikiPages", "No ending tag:") + " " + s.Substring(iScanInParagraph)); } switch (tagName) { case "a": case "b": case "div": case "i": case "span": iScanInParagraph = s.IndexOf("</" + tagName + ">", iScanInParagraph) + 3 + tagName.Length; continue; //continues scanning this paragraph. case "h1": case "h2": case "h3": case "ol": case "ul": case "table": case "img": //can NOT be self-terminating if (iScanInParagraph == 0) //s starts with a non-paragraph tag, so there is no partially assembled paragraph to process. //do nothing { } else //we are already part way into assembling a paragraph. { strbSnew.Append(ProcessParagraph(s.Substring(0, iScanInParagraph), startsWithCR)); startsWithCR = false; //subsequent paragraphs will not need this s = s.Substring(iScanInParagraph); //chop off start of s iScanInParagraph = 0; } //scan to the end of this element int iScanSibling = s.IndexOf("</" + tagName + ">") + 3 + tagName.Length; //tags without a closing tag were caught above. //move the non-paragraph content over to s new. strbSnew.Append(s.Substring(0, iScanSibling)); s = s.Substring(iScanSibling); //scanning will start a totally new paragraph break; default: if (isEmail) { iScanInParagraph = s.IndexOf("</" + tagName + ">", iScanInParagraph) + 3 + tagName.Length; continue; //continues scanning this paragraph } throw new ApplicationException(Lans.g("WikiPages", "Unexpected tag:") + " " + s.Substring(iScanInParagraph)); } } strbSnew.Append("</body>"); #endregion #region aggregation doc = new XmlDocument(); using (StringReader reader = new StringReader(strbSnew.ToString())) { doc.Load(reader); } StringBuilder strbOut = new StringBuilder(); XmlWriterSettings settings = new XmlWriterSettings(); settings.Indent = true; settings.IndentChars = "\t"; settings.OmitXmlDeclaration = true; settings.NewLineChars = "\n"; using (XmlWriter writer = XmlWriter.Create(strbOut, settings)) { doc.WriteTo(writer); } //spaces can't be handled prior to this point because crashes the xml parser. strbOut.Replace(" ", " "); //handle extra spaces. strbOut.Replace("<td></td>", "<td> </td>"); //force blank table cells to show not collapsed strbOut.Replace("<th></th>", "<th> </th>"); //and blank table headers strbOut.Replace("{{nbsp}}", " "); //couldn't add the earlier because strbOut.Replace("<p></p>", "<p> </p>"); //probably redundant but harmless //aggregate with master if (isEmail) { if (canAggregate) { s = PrefC.GetString(PrefName.EmailMasterTemplate).Replace("@@@body@@@", strbOut.ToString()); return(s); } return(strbOut.ToString()); } else { s = WikiPages.MasterPage.PageContent.Replace("@@@body@@@", strbOut.ToString()); } #endregion aggregation /* * //js This code is buggy. It will need very detailed comments and careful review before/if we ever turn it back on. * if(isPreviewOnly) { * //do not change cursor from pointer to IBeam to Hand as you move the cursor around the preview page * s=s.Replace("*{\n\t","*{\n\tcursor:default;\n\t"); * //do not underline links if you hover over them in the preview window * s=s.Replace("a:hover{\n\ttext-decoration:underline;","a:hover{\n\t"); * }*/ return(s); }