コード例 #1
0
ファイル: WaebricLexerTest.cs プロジェクト: spreeker/waebric
        public void EmbeddingTest()
        {
            //Set up tokenizer
            WaebricLexer lexer = new WaebricLexer(new StringReader("\"pre<\"\\\">\">post\""));
            lexer.LexicalizeStream();

            TokenIterator tokens = lexer.GetTokenIterator();

            //Test token
            Assert.AreEqual(1, tokens.GetSize());
            Assert.AreEqual(TokenType.EMBEDDING, tokens.Peek(1).GetType());

            //Get embedding and test inner tokens
            EmbeddingToken parsedToken = (EmbeddingToken) tokens.NextToken();
            TokenIterator embeddingTokens = parsedToken.GetTokenIterator();

            Assert.AreEqual(7, embeddingTokens.GetSize());
            Assert.AreEqual("\"", embeddingTokens.Peek(1).GetValue().ToString());
            Assert.AreEqual("pre", embeddingTokens.Peek(2).GetValue().ToString());
            Assert.AreEqual("<", embeddingTokens.Peek(3).GetValue().ToString());
            Assert.AreEqual("\\\">", embeddingTokens.Peek(4).GetValue().ToString());
            Assert.AreEqual(">", embeddingTokens.Peek(5).GetValue().ToString());
            Assert.AreEqual("post", embeddingTokens.Peek(6).GetValue().ToString());
            Assert.AreEqual("\"", embeddingTokens.Peek(7).GetValue().ToString());
        }
コード例 #2
0
        public void DoubleFunctionDefinition()
        {
            //First parse the initial file and then request the rest
            //Let's lexicalize the file
            StreamReader sourceStream = new StreamReader("../../../../TestChecker/WaebricTestFiles/doublefunctiondefinition.wae");
            WaebricLexer lexer = new WaebricLexer(sourceStream);

            lexer.LexicalizeStream();
            TokenIterator tokens = lexer.GetTokenIterator();

            //Lets parse the file
            WaebricParser parser = new WaebricParser(tokens);
            parser.Parse();

            SyntaxTree parsedTree = parser.GetTree();

            //Initialize ModuleCache with correct DirectoryPath
            ModuleCache.Instance.SetDirectoryPath("../../../../TestChecker/WaebricTestFiles/");

            //Lets check the tree
            WaebricChecker checker = new WaebricChecker();
            List<Exception> checkerExceptions = checker.CheckSyntaxTree(parsedTree);

            //Exception function already defined should be in list
            Assert.AreEqual(1, checkerExceptions.Count);
            Assert.AreEqual(typeof(FunctionAlreadyDefined), checkerExceptions.ToArray()[0].GetType());
        }
コード例 #3
0
ファイル: WaebricLexerTest.cs プロジェクト: spreeker/waebric
        public void ComplexEmbeddingTest()
        {
            //Set up tokenizer
            WaebricLexer lexer = new WaebricLexer(new StringReader("\"<a(href=\"http://www.microsoft.com\") \"Microsoft Corp\">\""));

            lexer.LexicalizeStream();

            TokenIterator tokens = lexer.GetTokenIterator();

            //Test token
            Assert.AreEqual(1, tokens.GetSize());
            Assert.AreEqual(TokenType.EMBEDDING, tokens.Peek(1).GetType());

            //Test tokens in embedding
            EmbeddingToken embeddingToken = (EmbeddingToken) tokens.NextToken();
            TokenIterator embeddingTokens = embeddingToken.GetTokenIterator();

            Assert.AreEqual(12, embeddingTokens.GetSize());
            Assert.AreEqual("\"", embeddingTokens.Peek(1).GetValue().ToString());
            Assert.AreEqual("", embeddingTokens.Peek(2).GetValue().ToString());
            Assert.AreEqual("<", embeddingTokens.Peek(3).GetValue().ToString());
            Assert.AreEqual("a", embeddingTokens.Peek(4).GetValue().ToString());
            Assert.AreEqual("(", embeddingTokens.Peek(5).GetValue().ToString());
            Assert.AreEqual("href", embeddingTokens.Peek(6).GetValue().ToString());
            Assert.AreEqual("=", embeddingTokens.Peek(7).GetValue().ToString());
            Assert.AreEqual("http://www.microsoft.com", embeddingTokens.Peek(8).GetValue().ToString());
            Assert.AreEqual(")", embeddingTokens.Peek(9).GetValue().ToString());
            Assert.AreEqual("Microsoft Corp", embeddingTokens.Peek(10).GetValue().ToString());
            Assert.AreEqual(">", embeddingTokens.Peek(11).GetValue().ToString());
            Assert.AreEqual("\"", embeddingTokens.Peek(12).GetValue().ToString());
        }
コード例 #4
0
ファイル: WaebricCompiler.cs プロジェクト: spreeker/waebric
        private static String Path; //Path of file to compile

        #endregion Fields

        #region Methods

        public static void Main(string[] args)
        {
            Console.WriteLine("Waebric Compiler/Interpreter v1.0");
            Console.WriteLine("---------------------------------");

            if (args.Length == 1)
            {   //There is one file specified.
                Path = args[0];
            }
            else
            {
                Console.WriteLine("WeabricCompiler: no input file specified.");
                Console.Read(); //Testing purposes only
                return;
            }

            //Let's lexicalize the file
            StreamReader sourceStream = new StreamReader(Path);
            WaebricLexer lexer = new WaebricLexer(sourceStream);

            lexer.LexicalizeStream();
            TokenIterator tokens = lexer.GetTokenIterator();

            if (tokens.GetSize() == 0)
            {   //Not tokens parsed
                Console.WriteLine("WaebricCompiler: Empty file or comments only.");
                return; //Nothing to compile so end program
            }

            //Lets parse the file
            WaebricParser parser = new WaebricParser(tokens);
            parser.Parse();

            SyntaxTree parsedTree = parser.GetTree();

            //Initialize ModuleCache with correct DirectoryPath
            ModuleCache.Instance.SetDirectoryPath(GetDirectoryPath());

            //Lets check the tree
            WaebricChecker checker = new WaebricChecker();
            checker.CheckSyntaxTree(parsedTree);

            //Lets interpret the tree and generate XHTML
            WaebricInterpreter interpreter = new WaebricInterpreter();
            interpreter.InterpretAST(parsedTree);
        }
コード例 #5
0
ファイル: ModuleParserTest.cs プロジェクト: spreeker/waebric
        public void ModuleParserComplexModuleNameTest()
        {
            SyntaxTree tree;

            //Create lexer to tokenize stream
            WaebricLexer lexer = new WaebricLexer(new StringReader("module test.test2.test3"));
            lexer.LexicalizeStream();

            //Retrieve tokenIterator from lexer and lets parse it
            WaebricParser parser = new WaebricParser(lexer.GetTokenIterator());
            parser.Parse();

            //Test if root is modulelist and it contains the right module
            tree = parser.GetTree();

            Module module = tree.GetRoot();
            Assert.AreEqual(3, module.GetModuleId().GetIdentifiers().Count);
            Assert.AreEqual("test.test2.test3", module.GetModuleId().ToString());
        }
コード例 #6
0
ファイル: ModuleParserTest.cs プロジェクト: spreeker/waebric
        public void ModuleParserImportTest()
        {
            SyntaxTree tree;

            //Create lexer to tokenize stream
            WaebricLexer lexer = new WaebricLexer(new StringReader("module test\n\nimport importtest"));
            lexer.LexicalizeStream();

            //Test if stream is lexicalized into 4 tokens
            Assert.IsTrue(lexer.GetTokenIterator().GetSize() == 4);

            //Retrieve tokenIterator from lexer and lets parse it
            WaebricParser parser = new WaebricParser(lexer.GetTokenIterator());
            parser.Parse();

            //Test tree structure
            tree = parser.GetTree();

            Module module = tree.GetRoot();
        }
コード例 #7
0
        /// <summary>
        /// Lexicalizes a buffer. When lexicalized it adds the tokens to the given list.
        /// </summary>
        /// <param name="tokens">TokenList to add new tokens to</param>
        /// <param name="buffer">Buffer to lexicalize</param>
        /// <param name="line">Linenumber of startposition</param>
        private void LexicalizeBuffer(List <Token> tokens, String buffer, int line)
        {
            if (buffer == null || buffer == "")
            {   //no data
                return;
            }

            //Create new lexer and lexicalize buffer
            StringReader stringReader = new StringReader(buffer);
            WaebricLexer lexer        = new WaebricLexer(stringReader);

            lexer.SetLine(line);
            lexer.LexicalizeStream();

            //Add new tokens to list
            List <Token> scannedTokens = lexer.GetTokenList();

            for (int i = 0; i <= (scannedTokens.Count - 1); i++)
            {
                tokens.Add(scannedTokens[i]);
            }
        }
コード例 #8
0
ファイル: WaebricLexerTest.cs プロジェクト: spreeker/waebric
        public void WaebricSymbolTokenTest()
        {
            //Set up tokenizer
            WaebricLexer lexer = new WaebricLexer(new StringReader("'test"));
            lexer.LexicalizeStream();

            TokenIterator tokens = lexer.GetTokenIterator();

            //Test token
            Assert.AreEqual(1, tokens.GetSize());
            Token token = tokens.NextToken();
            Assert.AreEqual(TokenType.WAEBRICSYMBOL, token.GetType());
            Assert.AreEqual("test", token.GetValue().ToString());
        }
コード例 #9
0
 public void CleanUp()
 {
     lexer = null;
 }
コード例 #10
0
        /// <summary>
        /// Initialize test
        /// </summary>
        /// <param name="stream">Stream to lexicalize</param>
        /// <returns>TokenIterator</returns>
        private TokenIterator Init(String stream)
        {
            lexer = new WaebricLexer(new StringReader(stream));
            lexer.LexicalizeStream();

            return lexer.GetTokenIterator();
        }
コード例 #11
0
        public void WaebricCheckerImportTest()
        {
            //First parse the initial file and then request the rest
            //Let's lexicalize the file
            StreamReader sourceStream = new StreamReader("../../../../TestChecker/WaebricTestFiles/home.wae");
            WaebricLexer lexer = new WaebricLexer(sourceStream);

            lexer.LexicalizeStream();
            TokenIterator tokens = lexer.GetTokenIterator();

            //Lets parse the file
            WaebricParser parser = new WaebricParser(tokens);
            parser.Parse();

            SyntaxTree parsedTree = parser.GetTree();

            //Initialize ModuleCache with correct DirectoryPath
            ModuleCache.Instance.SetDirectoryPath("../../../../TestChecker/WaebricTestFiles/");

            //Lets check the tree
            WaebricChecker checker = new WaebricChecker();
            List<Exception> checkerExceptions = checker.CheckSyntaxTree(parsedTree);

            //Test output
            Assert.AreEqual(0, checkerExceptions.Count);

            //Test if all modules except tree root are in cache
            Assert.IsTrue(ModuleCache.Instance.ContainsModule("first"));
            Assert.IsTrue(ModuleCache.Instance.ContainsModule("second"));
            Assert.IsTrue(ModuleCache.Instance.ContainsModule("common"));
        }
コード例 #12
0
ファイル: ModuleParserTest.cs プロジェクト: spreeker/waebric
        public void ModuleParserSingleModuleTest()
        {
            SyntaxTree tree;

            //Create lexer to tokenize stream
            WaebricLexer lexer = new WaebricLexer(new StringReader("module test"));
            lexer.LexicalizeStream();

            //Retrieve tokenIterator from lexer and lets parse it
            WaebricParser parser = new WaebricParser(lexer.GetTokenIterator());
            parser.Parse();

            //Test if root is modulelist and it contains the right module
            tree = parser.GetTree();

            Module module = tree.GetRoot();
            String[] identifiers = module.GetModuleId().GetIdentifiers().ToArray();
            Assert.AreEqual(1, identifiers.Length);
            Assert.AreEqual("test", identifiers[0]);
        }
コード例 #13
0
ファイル: ModuleCache.cs プロジェクト: spreeker/waebric
        /// <summary>
        /// Request an specified module
        /// </summary>
        /// <param name="indentifier">ModuleId of requested module</param>
        /// <returns>Requested module if available</returns>
        public Module RequestModule(ModuleId identifier)
        {
            if (ModuleTable.ContainsKey(identifier))
            {   //Module already loaded so return instance of module
                return (Module) ModuleTable[identifier];
            }

            //Module not cached, so load it
            StreamReader moduleStream = new StreamReader(GetPath(identifier));

            //Lexicalize and parse it
            WaebricLexer lexer = new WaebricLexer(moduleStream);
            lexer.LexicalizeStream();
            WaebricParser parser = new WaebricParser(lexer.GetTokenIterator());
            parser.Parse();

            //Get module of tree
            SyntaxTree tree = parser.GetTree();

            //Add module to hashtable
            Module requestedModule = tree.GetRoot();
            ModuleTable.Add(identifier, requestedModule);

            return requestedModule;
        }
コード例 #14
0
ファイル: ModuleParserTest.cs プロジェクト: spreeker/waebric
        public void ModuleParserSiteTest()
        {
            SyntaxTree tree = new SyntaxTree();

            //Create lexer to tokenize stream
            WaebricLexer lexer = new WaebricLexer(new StringReader("module test\n\nsite\n  site/index.html : home() ; site/index2.html : home()\nend"));
            lexer.LexicalizeStream();

            //Parse tokenized stream
            ModuleParser parser = new ModuleParser(lexer.GetTokenIterator());
            tree.SetRoot(parser.ParseModule());

            //Check module
            Module module = tree.GetRoot();
            Assert.IsTrue(module.GetModuleId().ToString() == "test");
            Assert.AreEqual(0, module.GetImports().Count); //No imports
            Assert.AreEqual(0, module.GetFunctionDefinitions().Count); //No function definitions
            Assert.AreEqual(1, module.GetSites().Count); //One site

            //Check site
            Site site = (Site) module.GetSites().Get(0);
            Assert.AreEqual(2, site.GetMappings().Count);
        }
コード例 #15
0
ファイル: WaebricLexer.cs プロジェクト: spreeker/waebric
        /// <summary>
        /// Lexicalizes a buffer. When lexicalized it adds the tokens to the given list.
        /// </summary>
        /// <param name="tokens">TokenList to add new tokens to</param>
        /// <param name="buffer">Buffer to lexicalize</param>
        /// <param name="line">Linenumber of startposition</param>
        private void LexicalizeBuffer(List<Token> tokens, String buffer, int line)
        {
            if (buffer == null || buffer == "")
            {   //no data
                return;
            }

            //Create new lexer and lexicalize buffer
            StringReader stringReader = new StringReader(buffer);
            WaebricLexer lexer = new WaebricLexer(stringReader);
            lexer.SetLine(line);
            lexer.LexicalizeStream();

            //Add new tokens to list
            List<Token> scannedTokens = lexer.GetTokenList();
            for (int i = 0; i <= (scannedTokens.Count - 1); i++)
            {
                tokens.Add(scannedTokens[i]);
            }
        }
コード例 #16
0
ファイル: WaebricLexerTest.cs プロジェクト: spreeker/waebric
        public void TestComplexStream()
        {
            WaebricLexer lexer = new WaebricLexer(new StringReader("module test\n\nsite site/index.html : home()\nend"));
            lexer.LexicalizeStream();

            Assert.IsTrue(lexer.GetTokenList().Count == 13);
        }
コード例 #17
0
ファイル: WaebricLexerTest.cs プロジェクト: spreeker/waebric
        public void SingleQuoteTest()
        {
            //Set up tokenizer
            WaebricLexer lexer = new WaebricLexer(new StringReader("\""));
            lexer.LexicalizeStream();

            TokenIterator tokens = lexer.GetTokenIterator();

            Assert.AreEqual(1, tokens.GetSize());
            Assert.AreEqual(TokenType.SYMBOL, tokens.Peek(1).GetType());
            Assert.AreEqual("\"", tokens.Peek(1).GetValue().ToString());
        }
コード例 #18
0
ファイル: WaebricLexerTest.cs プロジェクト: spreeker/waebric
        public void SingleLineCommentTest()
        {
            //Set up tokenizer
            WaebricLexer lexer = new WaebricLexer(new StringReader("//this is a comment"));
            lexer.LexicalizeStream();

            TokenIterator tokens = lexer.GetTokenIterator();

            Assert.AreEqual(0, tokens.GetSize());
        }
コード例 #19
0
ファイル: WaebricLexer.cs プロジェクト: spreeker/waebric
        /// <summary>
        /// Lexicalizes a quote
        /// </summary>
        private void LexicalizeQuote()
        {
            //Store current line number location for backtracking
            int tempLine = tokenizer.GetScannedLines();
            //Hold previous char for recognizing escape chars
            char previousChar = '\0';
            bool IsString = false;

            //Skip " token, only text is interesting
            CurrentToken = tokenizer.NextToken();

            //Ignore comments, due urls, etc
            tokenizer.SetIgnoreComments(true);

            //Check if this text is comment text
            Token[] tempArray = TokenStream.ToArray();
            if (tempArray[tempArray.Length - 1].GetType() == TokenType.KEYWORD && tempArray[tempArray.Length - 1].GetValue().ToString() == "comment")
            {
                IsString = true;
            }

            //Retrieve possible quoted text
            StringBuilder stringBuilder = new StringBuilder();
            tokenizer.SetIgnoreNumeric(true);
            while (tokenizer.GetCharacterValue() != '\"' || previousChar == '\\') //Scan until non escaped " found
            {
                if(CurrentToken == StreamTokenizer.EOF)
                {   // End of file, so it wasn't a quoted part but just a single "
                    tokenizer.SetIgnoreComments(false);
                    tokenizer.SetIgnoreNumeric(false);

                    //First add a single quote as token
                    TokenStream.Add(new Token("\"", TokenType.SYMBOL, tempLine));

                    //Second, scan remaining string
                    WaebricLexer tempLexer = new WaebricLexer(new StringReader(stringBuilder.ToString()));
                    tempLexer.LexicalizeStream();
                    List<Token> tempTokenList = tempLexer.GetTokenList();

                    //Add all tokens to stream
                    foreach(Token currentToken in tempTokenList)
                    {
                        TokenStream.Add(new Token(currentToken.GetValue(), currentToken.GetType(), (currentToken.GetLine()+tempLine)));
                    }

                    return; //Lexicalizing done
                }
                else if(tokenizer.GetCharacterValue() == '<' && !IsString)
                { //Embedding found, so lexicalize embedding
                    LexicalizeEmbedding(stringBuilder.ToString());
                    tokenizer.SetIgnoreComments(false);
                    tokenizer.SetIgnoreNumeric(false);
                    return;
                }

                //Get next part and add it to stringBuilder
                stringBuilder.Append(tokenizer.ToString());

                previousChar = tokenizer.GetCharacterValue();
                CurrentToken = tokenizer.NextToken();
            }
            tokenizer.SetIgnoreComments(false);
            tokenizer.SetIgnoreNumeric(false);
            //Check if string is correct quote text
            if (IsString)
            {
                if (!IsCorrectString(stringBuilder.ToString()))
                {
                    throw new StreamTokenizerException("String Text containts non valid characters", tempLine);
                }
            }

            TokenStream.Add(new Token(stringBuilder.ToString(), TokenType.TEXT, tempLine));

            //Skip " token, only text is interesting
            CurrentToken = tokenizer.NextToken();
        }
コード例 #20
0
        /// <summary>
        /// Lexicalizes a quote
        /// </summary>
        private void LexicalizeQuote()
        {
            //Store current line number location for backtracking
            int tempLine = tokenizer.GetScannedLines();
            //Hold previous char for recognizing escape chars
            char previousChar = '\0';
            bool IsString     = false;

            //Skip " token, only text is interesting
            CurrentToken = tokenizer.NextToken();

            //Ignore comments, due urls, etc
            tokenizer.SetIgnoreComments(true);

            //Check if this text is comment text
            Token[] tempArray = TokenStream.ToArray();
            if (tempArray[tempArray.Length - 1].GetType() == TokenType.KEYWORD && tempArray[tempArray.Length - 1].GetValue().ToString() == "comment")
            {
                IsString = true;
            }

            //Retrieve possible quoted text
            StringBuilder stringBuilder = new StringBuilder();

            tokenizer.SetIgnoreNumeric(true);
            while (tokenizer.GetCharacterValue() != '\"' || previousChar == '\\') //Scan until non escaped " found
            {
                if (CurrentToken == StreamTokenizer.EOF)
                {   // End of file, so it wasn't a quoted part but just a single "
                    tokenizer.SetIgnoreComments(false);
                    tokenizer.SetIgnoreNumeric(false);

                    //First add a single quote as token
                    TokenStream.Add(new Token("\"", TokenType.SYMBOL, tempLine));

                    //Second, scan remaining string
                    WaebricLexer tempLexer = new WaebricLexer(new StringReader(stringBuilder.ToString()));
                    tempLexer.LexicalizeStream();
                    List <Token> tempTokenList = tempLexer.GetTokenList();

                    //Add all tokens to stream
                    foreach (Token currentToken in tempTokenList)
                    {
                        TokenStream.Add(new Token(currentToken.GetValue(), currentToken.GetType(), (currentToken.GetLine() + tempLine)));
                    }

                    return; //Lexicalizing done
                }
                else if (tokenizer.GetCharacterValue() == '<' && !IsString)
                { //Embedding found, so lexicalize embedding
                    LexicalizeEmbedding(stringBuilder.ToString());
                    tokenizer.SetIgnoreComments(false);
                    tokenizer.SetIgnoreNumeric(false);
                    return;
                }

                //Get next part and add it to stringBuilder
                stringBuilder.Append(tokenizer.ToString());

                previousChar = tokenizer.GetCharacterValue();
                CurrentToken = tokenizer.NextToken();
            }
            tokenizer.SetIgnoreComments(false);
            tokenizer.SetIgnoreNumeric(false);
            //Check if string is correct quote text
            if (IsString)
            {
                if (!IsCorrectString(stringBuilder.ToString()))
                {
                    throw new StreamTokenizerException("String Text containts non valid characters", tempLine);
                }
            }

            TokenStream.Add(new Token(stringBuilder.ToString(), TokenType.TEXT, tempLine));

            //Skip " token, only text is interesting
            CurrentToken = tokenizer.NextToken();
        }