コード例 #1
0
ファイル: Type1FontParser.cs プロジェクト: rbanks54/PdfPig
        /// <summary>
        /// Parses an embedded Adobe Type 1 font file.
        /// </summary>
        /// <param name="inputBytes">The bytes of the font program.</param>
        /// <param name="length1">The length in bytes of the clear text portion of the font program.</param>
        /// <param name="length2">The length in bytes of the encrypted portion of the font program.</param>
        /// <returns>The parsed type 1 font.</returns>
        public Type1FontProgram Parse(IInputBytes inputBytes, int length1, int length2)
        {
            // Sometimes the entire PFB file including the header bytes can be included which prevents parsing in the normal way.
            var isEntirePfbFile = inputBytes.Peek() == PfbFileIndicator;

            IReadOnlyList <byte> eexecPortion = new byte[0];

            if (isEntirePfbFile)
            {
                var(ascii, binary) = ReadPfbHeader(inputBytes);

                eexecPortion = binary;
                inputBytes   = new ByteArrayInputBytes(ascii);
            }

            var scanner = new CoreTokenScanner(inputBytes);

            if (!scanner.TryReadToken(out CommentToken comment) || !comment.Data.StartsWith("!"))
            {
                throw new InvalidFontFormatException("The Type1 program did not start with '%!'.");
            }

            string name;
            var    parts = comment.Data.Split(new[] { " " }, StringSplitOptions.RemoveEmptyEntries);

            if (parts.Length == 3)
            {
                name = parts[1];
            }
            else
            {
                name = "Unknown";
            }

            var comments = new List <string>();

            while (scanner.MoveNext() && scanner.CurrentToken is CommentToken commentToken)
            {
                comments.Add(commentToken.Data);
            }

            var dictionaries = new List <DictionaryToken>();

            // Override arrays and names since type 1 handles these differently.
            var arrayTokenizer = new Type1ArrayTokenizer();
            var nameTokenizer  = new Type1NameTokenizer();

            scanner.RegisterCustomTokenizer((byte)'{', arrayTokenizer);
            scanner.RegisterCustomTokenizer((byte)'/', nameTokenizer);

            try
            {
                var tempEexecPortion = new List <byte>();
                var tokenSet         = new PreviousTokenSet();
                tokenSet.Add(scanner.CurrentToken);
                while (scanner.MoveNext())
                {
                    if (scanner.CurrentToken is OperatorToken operatorToken)
                    {
                        if (Equals(scanner.CurrentToken, OperatorToken.Eexec))
                        {
                            int offset = 0;

                            while (inputBytes.MoveNext())
                            {
                                if (inputBytes.CurrentByte == (byte)ClearToMark[offset])
                                {
                                    offset++;
                                }
                                else
                                {
                                    if (offset > 0)
                                    {
                                        for (int i = 0; i < offset; i++)
                                        {
                                            tempEexecPortion.Add((byte)ClearToMark[i]);
                                        }
                                    }

                                    offset = 0;
                                }

                                if (offset == ClearToMark.Length)
                                {
                                    break;
                                }

                                if (offset > 0)
                                {
                                    continue;
                                }

                                tempEexecPortion.Add(inputBytes.CurrentByte);
                            }
                        }
                        else
                        {
                            HandleOperator(operatorToken, scanner, tokenSet, dictionaries);
                        }
                    }

                    tokenSet.Add(scanner.CurrentToken);
                }

                if (!isEntirePfbFile)
                {
                    eexecPortion = tempEexecPortion;
                }
            }
            finally
            {
                scanner.DeregisterCustomTokenizer(arrayTokenizer);
                scanner.DeregisterCustomTokenizer(nameTokenizer);
            }

            var encoding    = GetEncoding(dictionaries);
            var matrix      = GetFontMatrix(dictionaries);
            var boundingBox = GetBoundingBox(dictionaries);

            var(privateDictionary, charStrings) = encryptedPortionParser.Parse(eexecPortion, false);

            return(new Type1FontProgram(name, encoding, matrix, boundingBox ?? new PdfRectangle(), privateDictionary, charStrings));
        }
コード例 #2
0
ファイル: Type1FontParser.cs プロジェクト: lulzzz/PdfPig
        private void HandleOperator(OperatorToken token, IInputBytes bytes, ISeekableTokenScanner scanner, PreviousTokenSet set, List <DictionaryToken> dictionaries)
        {
            switch (token.Data)
            {
            case "dict":
                var number     = ((NumericToken)set[0]).Int;
                var dictionary = ReadDictionary(number, scanner);

                dictionaries.Add(dictionary);
                break;

            case "currentfile":
                if (!scanner.MoveNext() || scanner.CurrentToken != OperatorToken.Eexec)
                {
                    return;
                }

                // For now we will not read this stuff.
                SkipEncryptedContent(bytes);
                break;

            default:
                return;
            }
        }
コード例 #3
0
ファイル: Type1FontParser.cs プロジェクト: rbanks54/PdfPig
        private static void HandleOperator(OperatorToken token, ISeekableTokenScanner scanner, PreviousTokenSet set, List <DictionaryToken> dictionaries)
        {
            switch (token.Data)
            {
            case "dict":
                var number     = ((NumericToken)set[0]).Int;
                var dictionary = ReadDictionary(number, scanner);

                dictionaries.Add(dictionary);
                break;

            default:
                return;
            }
        }
コード例 #4
0
ファイル: Type1FontParser.cs プロジェクト: lulzzz/PdfPig
        public Type1Font Parse(IInputBytes inputBytes)
        {
            var scanner = new CoreTokenScanner(inputBytes);

            if (!scanner.TryReadToken(out CommentToken comment) || !comment.Data.StartsWith("!"))
            {
                throw new InvalidFontFormatException("The Type1 program did not start with '%!'.");
            }

            string name;
            var    parts = comment.Data.Split(new[] { " " }, StringSplitOptions.RemoveEmptyEntries);

            if (parts.Length == 3)
            {
                name = parts[1];
            }
            else
            {
                name = "Unknown";
            }

            var comments = new List <string>();

            while (scanner.MoveNext() && scanner.CurrentToken is CommentToken commentToken)
            {
                comments.Add(commentToken.Data);
            }

            var dictionaries = new List <DictionaryToken>();

            // Override arrays and names since type 1 handles these differently.
            var arrayTokenizer = new Type1ArrayTokenizer();
            var nameTokenizer  = new Type1NameTokenizer();

            scanner.RegisterCustomTokenizer((byte)'{', arrayTokenizer);
            scanner.RegisterCustomTokenizer((byte)'/', nameTokenizer);

            try
            {
                var tokenSet = new PreviousTokenSet();
                tokenSet.Add(scanner.CurrentToken);
                while (scanner.MoveNext())
                {
                    if (scanner.CurrentToken is OperatorToken operatorToken)
                    {
                        HandleOperator(operatorToken, inputBytes, scanner, tokenSet, dictionaries);
                    }

                    tokenSet.Add(scanner.CurrentToken);
                }
            }
            finally
            {
                scanner.DeregisterCustomTokenizer(arrayTokenizer);
                scanner.DeregisterCustomTokenizer(nameTokenizer);
            }

            var encoding    = GetEncoding(dictionaries);
            var matrix      = GetFontMatrix(dictionaries);
            var boundingBox = GetBoundingBox(dictionaries);

            return(new Type1Font(name, encoding, matrix, boundingBox));
        }