/// <summary>Pretty-prints a DOM Document.</summary> internal virtual void PrettyPrint(IDocument doc, Stream Output) { Out o = new OutImpl(); PPrint pprint; Node document; if (!(doc is DomDocumentImpl)) { return; } document = ((DomDocumentImpl)doc).Adaptee; o.State = StreamIn.FSM_ASCII; o.Encoding = _options.CharEncoding; if (Output != null) { pprint = new PPrint(_options); o.Output = Output; if (_options.XmlTags) { pprint.PrintXmlTree(o, (short)0, 0, null, document); } else { pprint.PrintTree(o, (short)0, 0, null, document); } pprint.FlushLine(o, 0); } }
public virtual void CreateSlides(Lexer lexer, Node root) { Node body; string buf; Out output = new OutImpl(); body = root.FindBody(lexer.Options.tt); count = CountSlides(body); slidecontent = body.Content; AddTransitionEffect(lexer, root, EFFECT_BLEND, 3.0); for (slide = 1; slide <= count; ++slide) { buf = "slide" + slide + ".html"; output.State = StreamIn.FSM_ASCII; output.Encoding = _options.CharEncoding; try { output.Output = new FileStream(buf, FileMode.Create); PrintTree(output, (short) 0, 0, lexer, root); FlushLine(output, 0); output.Output.Close(); } catch (IOException e) { Console.Error.WriteLine(buf + e.ToString()); } } /* delete superfluous slides by deleting slideN.html for N = count+1, count+2, etc. until no such file is found. */ for (;;) { buf = "slide" + slide + "html"; bool tmpBool; if (File.Exists((new FileInfo(buf)).FullName)) { File.Delete((new FileInfo(buf)).FullName); tmpBool = true; } else if (Directory.Exists((new FileInfo(buf)).FullName)) { Directory.Delete((new FileInfo(buf)).FullName); tmpBool = true; } else { tmpBool = false; } if (!tmpBool) { break; } ++slide; } }
/// <summary>Pretty-prints a DOM Document.</summary> internal virtual void PrettyPrint(IDocument doc, Stream Output) { Out o = new OutImpl(); PPrint pprint; Node document; if (!(doc is DomDocumentImpl)) { return; } document = ((DomDocumentImpl)doc).Adaptee; o.State = StreamIn.FSM_ASCII; o.Encoding = _options.CharEncoding; if (Output != null) { pprint = new PPrint(_options); o.Output = Output; if (_options.XmlTags) { pprint.PrintXmlTree(o, (short) 0, 0, null, document); } else { pprint.PrintTree(o, (short) 0, 0, null, document); } pprint.FlushLine(o, 0); } }
/// <summary> Internal routine that actually does the parsing. The caller /// can pass either an InputStream or file name. If both are passed, /// the file name is preferred. /// </summary> internal Node ParseInternal(Stream input, string file, Stream Output, TidyMessageCollection messages) { Lexer lexer; Node document = null; Node doctype; Out o = new OutImpl(); /* normal output stream */ PPrint pprint; /* ensure config is self-consistent */ _options.Adjust(); if (file != null) { input = new FileStream(file, FileMode.Open, FileAccess.Read); } else if (input == null) { input = Console.OpenStandardInput(); } if (input != null) { lexer = new Lexer(new ClsStreamInImpl(input, _options.CharEncoding, _options.TabSize), _options); lexer.messages = messages; /* store pointer to lexer in input stream to allow character encoding errors to be reported */ lexer.input.Lexer = lexer; /* Tidy doesn't alter the doctype for generic XML docs */ if (_options.XmlTags) { document = ParserImpl.parseXMLDocument(lexer); } else { document = ParserImpl.parseDocument(lexer); if (!document.CheckNodeIntegrity()) { Report.BadTree(lexer); return null; } Clean cleaner = new Clean(_options.tt); /* simplifies <b><b> ... </b> ...</b> etc. */ cleaner.NestedEmphasis(document); /* cleans up <dir>indented text</dir> etc. */ cleaner.List2BQ(document); cleaner.BQ2Div(document); /* replaces i by em and b by strong */ if (_options.LogicalEmphasis) { cleaner.EmFromI(document); } if (_options.Word2000 && cleaner.IsWord2000(document, _options.tt)) { /* prune Word2000's <![if ...]> ... <![endif]> */ cleaner.DropSections(lexer, document); /* drop style & class attributes and empty p, span elements */ cleaner.CleanWord2000(lexer, document); } /* replaces presentational markup by style rules */ if (_options.MakeClean || _options.DropFontTags) { cleaner.CleanTree(lexer, document); } if (!document.CheckNodeIntegrity()) { Report.BadTree(lexer); return null; } doctype = document.FindDocType(); if (document.Content != null) { if (_options.Xhtml) { lexer.SetXhtmlDocType(document); } else { lexer.FixDocType(document); } if (_options.TidyMark) { lexer.AddGenerator(document); } } /* ensure presence of initial <?XML version="1.0"?> */ if (_options.XmlOut && _options.XmlPi) { lexer.FixXmlPI(document); } if (document.Content != null) { Report.ReportVersion(lexer, doctype); Report.ReportNumWarnings(lexer); } } // Try to close the InputStream but only if if we created it. if ((file != null) && (input != Console.OpenStandardOutput())) { try { input.Close(); } catch (IOException) { } } if (lexer.messages.Errors > 0) { Report.NeedsAuthorIntervention(lexer); } o.State = StreamIn.FSM_ASCII; o.Encoding = _options.CharEncoding; if (lexer.messages.Errors == 0) { if (_options.BurstSlides) { Node body; body = null; /* remove doctype to avoid potential clash with markup introduced when bursting into slides */ /* discard the document type */ doctype = document.FindDocType(); if (doctype != null) { Node.DiscardElement(doctype); } /* slides use transitional features */ lexer.versions |= HtmlVersion.Html40Loose; /* and patch up doctype to match */ if (_options.Xhtml) { lexer.SetXhtmlDocType(document); } else { lexer.FixDocType(document); } /* find the body element which may be implicit */ body = document.FindBody(_options.tt); if (body != null) { pprint = new PPrint(_options); Report.ReportNumberOfSlides(lexer, pprint.CountSlides(body)); pprint.CreateSlides(lexer, document); } else { Report.MissingBody(lexer); } } else if (Output != null) { pprint = new PPrint(_options); o.Output = Output; if (_options.XmlTags) { pprint.PrintXmlTree(o, (short) 0, 0, lexer, document); } else { pprint.PrintTree(o, (short) 0, 0, lexer, document); } pprint.FlushLine(o, 0); } } Report.ErrorSummary(lexer); } return document; }
/// <summary> Internal routine that actually does the parsing. The caller /// can pass either an InputStream or file name. If both are passed, /// the file name is preferred. /// </summary> internal Node ParseInternal(Stream input, string file, Stream Output, TidyMessageCollection messages) { Lexer lexer; Node document = null; Node doctype; Out o = new OutImpl(); /* normal output stream */ PPrint pprint; /* ensure config is self-consistent */ _options.Adjust(); if (file != null) { input = new FileStream(file, FileMode.Open, FileAccess.Read); } else if (input == null) { input = Console.OpenStandardInput(); } if (input != null) { lexer = new Lexer(new ClsStreamInImpl(input, _options.CharEncoding, _options.TabSize), _options); lexer.messages = messages; /* * store pointer to lexer in input stream * to allow character encoding errors to be * reported */ lexer.input.Lexer = lexer; /* Tidy doesn't alter the doctype for generic XML docs */ if (_options.XmlTags) { document = ParserImpl.parseXMLDocument(lexer); } else { document = ParserImpl.parseDocument(lexer); if (!document.CheckNodeIntegrity()) { Report.BadTree(lexer); return(null); } Clean cleaner = new Clean(_options.tt); /* simplifies <b><b> ... </b> ...</b> etc. */ cleaner.NestedEmphasis(document); /* cleans up <dir>indented text</dir> etc. */ cleaner.List2BQ(document); cleaner.BQ2Div(document); /* replaces i by em and b by strong */ if (_options.LogicalEmphasis) { cleaner.EmFromI(document); } if (_options.Word2000 && cleaner.IsWord2000(document, _options.tt)) { /* prune Word2000's <![if ...]> ... <![endif]> */ cleaner.DropSections(lexer, document); /* drop style & class attributes and empty p, span elements */ cleaner.CleanWord2000(lexer, document); } /* replaces presentational markup by style rules */ if (_options.MakeClean || _options.DropFontTags) { cleaner.CleanTree(lexer, document); } if (!document.CheckNodeIntegrity()) { Report.BadTree(lexer); return(null); } doctype = document.FindDocType(); if (document.Content != null) { if (_options.Xhtml) { lexer.SetXhtmlDocType(document); } else { lexer.FixDocType(document); } if (_options.TidyMark) { lexer.AddGenerator(document); } } /* ensure presence of initial <?XML version="1.0"?> */ if (_options.XmlOut && _options.XmlPi) { lexer.FixXmlPI(document); } if (document.Content != null) { Report.ReportVersion(lexer, doctype); Report.ReportNumWarnings(lexer); } } // Try to close the InputStream but only if if we created it. if ((file != null) && (input != Console.OpenStandardOutput())) { try { input.Close(); } catch (IOException) { } } if (lexer.messages.Errors > 0) { Report.NeedsAuthorIntervention(lexer); } o.State = StreamIn.FSM_ASCII; o.Encoding = _options.CharEncoding; if (lexer.messages.Errors == 0) { if (_options.BurstSlides) { Node body; body = null; /* * remove doctype to avoid potential clash with * markup introduced when bursting into slides */ /* discard the document type */ doctype = document.FindDocType(); if (doctype != null) { Node.DiscardElement(doctype); } /* slides use transitional features */ lexer.versions |= HtmlVersion.Html40Loose; /* and patch up doctype to match */ if (_options.Xhtml) { lexer.SetXhtmlDocType(document); } else { lexer.FixDocType(document); } /* find the body element which may be implicit */ body = document.FindBody(_options.tt); if (body != null) { pprint = new PPrint(_options); Report.ReportNumberOfSlides(lexer, pprint.CountSlides(body)); pprint.CreateSlides(lexer, document); } else { Report.MissingBody(lexer); } } else if (Output != null) { pprint = new PPrint(_options); o.Output = Output; if (_options.XmlTags) { pprint.PrintXmlTree(o, (short)0, 0, lexer, document); } else { pprint.PrintTree(o, (short)0, 0, lexer, document); } pprint.FlushLine(o, 0); } } Report.ErrorSummary(lexer); } return(document); }