shithub: freetype+ttf2subf

Download patch

ref: ebe85f59c9540a37ff72ffe8aba9c08b9d8b28ef
parent: b3de817acb3b22a17c3e6e10dea68031ad5e16bb
author: David Turner <[email protected]>
date: Fri May 11 10:25:57 EDT 2001

* include/freetype/fttrigon.h, src/base/fttrigon.c, src/base/ftbase.c,
    src/base/Jamfile, src/base/rules.mk: adding trigonometric functions
    to the core API (using Cordic algorithms).

    * builds/top_level.mk, builds/newline, builds/detect.mk: fixed problems
    with Make on Windows 2000, as well as problems when "make distclean" is
    invoked on a non-Unix platform when there is no "config.mk" in the
    current directory..

    * builds/freetype.mk: fixed a problem with object deletions under
    Dos/Windows/OS/2 systems

    * src/tools: added new directory to hold tools and test programs
    moved docmaker.py, glnames.py to it..

    * src/tools/docmaker.py: improved the script to add the current date
    at the footer of each web page (useful to distinguish between versions)

    * Jamfile: fixed incorrect HDRMACRO argument.

    * TODO: removed the cubic arc bbox computation note, since it has been
    fixed recently..

    * include/freetype/t1tables.h, include/freetype/config/ftoption.h:
    formatting

git/fs: mount .git/fs: mount/attach disallowed
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,36 @@
+2001-05-11  David Turner  <[email protected]>
+
+    * include/freetype/fttrigon.h, src/base/fttrigon.c, src/base/ftbase.c,
+    src/base/Jamfile, src/base/rules.mk: adding trigonometric functions
+    to the core API (using Cordic algorithms).
+
+    * builds/top_level.mk, builds/newline, builds/detect.mk: fixed problems
+    with Make on Windows 2000, as well as problems when "make distclean" is
+    invoked on a non-Unix platform when there is no "config.mk" in the
+    current directory..
+
+    * builds/freetype.mk: fixed a problem with object deletions under
+    Dos/Windows/OS/2 systems
+
+    * src/tools: added new directory to hold tools and test programs
+    moved docmaker.py, glnames.py to it..
+
+    * src/tools/docmaker.py: improved the script to add the current date
+    at the footer of each web page (useful to distinguish between versions)
+
+    * Jamfile: fixed incorrect HDRMACRO argument.
+
+    * TODO: removed the cubic arc bbox computation note, since it has been
+    fixed recently..
+
+    * include/freetype/t1tables.h, include/freetype/config/ftoption.h:
+    formatting
+
+2001-05-10  David Turner  <[email protected]>
+
+        * src/base/ftobjs.c (FT_Open_Face): fixed a small memory leaked
+        which happened when trying to open 0-size font files !!
+
 2001-05-09  Werner Lemberg  <[email protected]>
 
 	* include/freetype/internal/ftcalc.h: Move declaration of
@@ -21,8 +54,8 @@
 
 2001-04-27  David Turner  <[email protected]>
 
-	* src/base/ftbbox.c (BBox_Cubic_Check): Fixed the coefficient     
-	normalization algorithm (invalid final bit position, and invalid 
+	* src/base/ftbbox.c (BBox_Cubic_Check): Fixed the coefficient
+	normalization algorithm (invalid final bit position, and invalid
 	shift computation).
 
 2001-04-26  Werner Lemberg  <[email protected]>
@@ -66,7 +99,7 @@
 	types on platforms where Autoconf is not available).  Also removed
 	FTCALC_USE_LONG_LONG and replaced it with
 	FT_CONFIG_OPTION_FORCE_INT64.
-	
+
 	* builds/win32/freetype.dsp: Updated the Visual C++ project file.
 	Doesn't create a DLL yet.
 
--- a/builds/detect.mk
+++ b/builds/detect.mk
@@ -131,22 +131,25 @@
 	@echo ""
 	@$(COPY) $(CONFIG_RULES) $(CONFIG_MK)
 
+
+# special case for Dos, Windows, OS/2, where echo "" doesn't work correctly !!
+#
 dos_setup:
-	@echo +	@type builds\newline
 	@echo $(PROJECT_TITLE) build system -- automatic system detection
-	@echo +	@type builds\newline
 	@echo The following settings are used:
-	@echo -	@echo ��platform���������������������$(PLATFORM)
-	@echo ��compiler���������������������$(CC)
-	@echo ��configuration directory������$(BUILD)
-	@echo ��configuration rules����������$(CONFIG_RULES)
-	@echo +	@type builds\newline
+	@echo   platform���������������������$(PLATFORM)
+	@echo   compiler���������������������$(CC)
+	@echo   configuration directory������$(BUILD)
+	@echo   configuration rules����������$(CONFIG_RULES)
+	@type builds\newline
 	@echo If this does not correspond to your system or settings please remove the file
 	@echo '$(CONFIG_MK)' from this directory then read the INSTALL file for help.
-	@echo +	@type builds\newline
 	@echo Otherwise, simply type 'make' again to build the library.
-	@echo +	@type builds\newline
 	@$(COPY) $(subst /,\,$(CONFIG_RULES) $(CONFIG_MK)) > nul
 
 # EOF
--- a/builds/freetype.mk
+++ b/builds/freetype.mk
@@ -273,13 +273,14 @@
 # The Dos command shell does not support very long list of arguments, so
 # we are stuck with wildcards.
 #
+# don't break the command lines with, this prevents the "del" command from
+# working correctly on Win9x
+#
 clean_project_dos:
-	-$(DELETE) $(subst $(SEP),$(HOSTSEP),$(OBJ_))*.$O \
-                   $(CLEAN) $(NO_OUTPUT)
+	-$(DELETE) $(subst $(SEP),$(HOSTSEP),$(OBJ_))*.$O $(CLEAN) $(NO_OUTPUT)
 
 distclean_project_dos: clean_project_dos
-	-$(DELETE) $(subst $(SEP),$(HOSTSEP),$(PROJECT_LIBRARY)) \
-                   $(DISTCLEAN) $(NO_OUTPUT)
+	-$(DELETE) $(subst $(SEP),$(HOSTSEP),$(PROJECT_LIBRARY)) $(DISTCLEAN) $(NO_OUTPUT)
 
 
 .PHONY: remove_config_mk
--- a/builds/link_std.mk
+++ b/builds/link_std.mk
@@ -32,7 +32,7 @@
   #
   $(PROJECT_LIBRARY): $(OBJECTS_LIST)
     ifdef CLEAN_LIBRARY
-	  -$(CLEAN_LIBRARY) $(NO_OUTPUT)
+	-$(CLEAN_LIBRARY) xx $(NO_OUTPUT)
     endif
 	$(LINK_LIBRARY)
 
--- /dev/null
+++ b/builds/newline
@@ -1,0 +1,1 @@
+
--- a/builds/toplevel.mk
+++ b/builds/toplevel.mk
@@ -97,7 +97,12 @@
   # GNU make.  Similarly, `nul' is created if e.g. `make setup win32' has
   # been erroneously used.
   #
-  distclean:
+  # note: this test is duplicated in "builds/toplevel.mk"
+  is_unix := $(strip $(wildcard /sbin/init) $(wildcard /hurd/auth))
+  ifneq ($(is_unix),)
+
+
+    distclean:
 	  $(RM) builds/unix/config.cache
 	  $(RM) builds/unix/config.log
 	  $(RM) builds/unix/config.status
@@ -104,6 +109,8 @@
 	  $(RM) builds/unix/unix-def.mk
 	  $(RM) builds/unix/unix-cc.mk
 	  $(RM) nul
+
+  endif # test is_unix
 
   # IMPORTANT:
   #
--- a/builds/unix/detect.mk
+++ b/builds/unix/detect.mk
@@ -16,6 +16,7 @@
 
 ifeq ($(PLATFORM),ansi)
 
+  # note: this test is duplicated in "builds/toplevel.mk"
   is_unix := $(strip $(wildcard /sbin/init) $(wildcard /hurd/auth))
   ifneq ($(is_unix),)
 
--- a/docs/docmaker.py
+++ /dev/null
@@ -1,1654 +1,0 @@
-#!/usr/bin/env python
-#
-#  DocMaker 0.1 (c) 2000-2001 David Turner <[email protected]>
-#
-#  DocMaker is a very simple program used to generate the API Reference
-#  of programs by extracting comments from source files, and generating
-#  the equivalent HTML documentation.
-#
-#  DocMaker is very similar to other tools like Doxygen, with the
-#  following differences:
-#
-#    - It is written in Python (so it is slow, but easy to maintain and
-#      improve).
-#
-#    - The comment syntax used by DocMaker is simpler and makes for
-#      clearer comments.
-#
-#  Of course, it doesn't have all the goodies of most similar tools,
-#  (e.g. C++ class hierarchies), but hey, it is only 2000 lines of
-#  Python.
-#
-#  DocMaker is mainly used to generate the API references of several
-#  FreeType packages.
-#
-#   - David
-#
-
-import fileinput, sys, os, string, glob, getopt
-
-# The Project's title.  This can be overridden from the command line with
-# the options "-t" or "--title".
-#
-project_title = "Project"
-
-# The project's filename prefix.  This can be set from the command line with
-# the options "-p" or "--prefix"
-#
-project_prefix = ""
-
-# The project's documentation output directory.  This can be set from the
-# command line with the options "-o" or "--output".
-#
-output_dir = None
-
-
-# The following defines the HTML header used by all generated pages.
-#
-html_header_1 = """\
-<html>
-<header>
-<title>"""
-
-html_header_2= """ API Reference</title>
-<basefont face="Verdana,Geneva,Arial,Helvetica">
-<style content="text/css">
-  P { text-align=justify }
-  H1 { text-align=center }
-  LI { text-align=justify }
-</style>
-</header>
-<body text=#000000
-      bgcolor=#FFFFFF
-      link=#0000EF
-      vlink=#51188E
-      alink=#FF0000>
-<center><h1>"""
-
-html_header_3=""" API Reference</h1></center>
-"""
-
-# This is recomputed later when the project title changes.
-#
-html_header = html_header_1 + project_title + html_header_2 + project_title + html_header_3
-
-
-# The HTML footer used by all generated pages.
-#
-html_footer = """\
-</body>
-</html>"""
-
-# The header and footer used for each section.
-#
-section_title_header = "<center><h1>"
-section_title_footer = "</h1></center>"
-
-# The header and footer used for code segments.
-#
-code_header = "<font color=blue><pre>"
-code_footer = "</pre></font>"
-
-# Paragraph header and footer.
-#
-para_header = "<p>"
-para_footer = "</p>"
-
-# Block header and footer.
-#
-block_header = "<center><table width=75%><tr><td>"
-block_footer = "</td></tr></table><hr width=75%></center>"
-
-# Description header/footer.
-#
-description_header = "<center><table width=87%><tr><td>"
-description_footer = "</td></tr></table></center><br>"
-
-# Marker header/inter/footer combination.
-#
-marker_header = "<center><table width=87% cellpadding=5><tr bgcolor=#EEEEFF><td><em><b>"
-marker_inter  = "</b></em></td></tr><tr><td>"
-marker_footer = "</td></tr></table></center>"
-
-# Source code extracts header/footer.
-#
-source_header = "<center><table width=87%><tr bgcolor=#D6E8FF width=100%><td><pre>"
-source_footer = "</pre></table></center><br>"
-
-# Chapter header/inter/footer.
-#
-chapter_header = "<center><table width=75%><tr><td><h2>"
-chapter_inter  = "</h2><ul>"
-chapter_footer = "</ul></td></tr></table></center>"
-
-current_section = None
-
-
-# This function is used to sort the index.  It is a simple lexicographical
-# sort, except that it places capital letters before lowercase ones.
-#
-def index_sort( s1, s2 ):
-    if not s1:
-        return -1
-
-    if not s2:
-        return 1
-
-    l1 = len( s1 )
-    l2 = len( s2 )
-    m1 = string.lower( s1 )
-    m2 = string.lower( s2 )
-
-    for i in range( l1 ):
-        if i >= l2 or m1[i] > m2[i]:
-            return 1
-
-        if m1[i] < m2[i]:
-            return -1
-
-        if s1[i] < s2[i]:
-            return -1
-
-        if s1[i] > s2[i]:
-            return 1
-
-    if l2 > l1:
-        return -1
-
-    return 0
-
-
-# Sort input_list, placing the elements of order_list in front.
-#
-def sort_order_list( input_list, order_list ):
-    new_list = order_list[:]
-    for id in input_list:
-        if not id in order_list:
-            new_list.append( id )
-    return new_list
-
-
-# Translate a single line of source to HTML.  This will convert
-# a "<" into "&lt.", ">" into "&gt.", etc.
-#
-def html_format( line ):
-    result = string.replace( line, "<", "&lt." )
-    result = string.replace( line, ">", "&gt." )
-    result = string.replace( line, "&", "&amp." )
-    return result
-
-
-# Open the standard output to a given project documentation file.  Use
-# "output_dir" to determine the filename location if necessary and save the
-# old stdout in a tuple that is returned by this function.
-#
-def open_output( filename ):
-    global output_dir
-
-    if output_dir and output_dir != "":
-        filename = output_dir + os.sep + filename
-
-    old_stdout = sys.stdout
-    new_file   = open( filename, "w" )
-    sys.stdout = new_file
-
-    return ( new_file, old_stdout )
-
-
-# Close the output that was returned by "close_output".
-#
-def close_output( output ):
-    output[0].close()
-    sys.stdout = output[1]
-
-
-# Check output directory.
-#
-def check_output( ):
-    global output_dir
-    if output_dir:
-        if output_dir != "":
-            if not os.path.isdir( output_dir ):
-                sys.stderr.write( "argument" + " '" + output_dir + "' " +
-                                  "is not a valid directory" )
-                sys.exit( 2 )
-        else:
-            output_dir = None
-
-
-# The FreeType 2 reference is extracted from the source files.  These
-# contain various comment blocks that follow one of the following formats:
-#
-#  /**************************
-#   *
-#   *  FORMAT1
-#   *
-#   *
-#   *
-#   *
-#   *************************/
-#
-#  /**************************/
-#  /*                        */
-#  /*  FORMAT2               */
-#  /*                        */
-#  /*                        */
-#  /*                        */
-#  /*                        */
-#
-#  /**************************/
-#  /*                        */
-#  /*  FORMAT3               */
-#  /*                        */
-#  /*                        */
-#  /*                        */
-#  /*                        */
-#  /**************************/
-#
-# Each block contains a list of markers; each one can be followed by
-# some arbitrary text or a list of fields.  Here an example:
-#
-#    <Struct>
-#       MyStruct
-#
-#    <Description>
-#       this structure holds some data
-#
-#    <Fields>
-#       x :: horizontal coordinate
-#       y :: vertical coordinate
-#
-#
-# This example defines three markers: 'Struct', 'Description' & 'Fields'.
-# The first two markers contain arbitrary text, while the last one contains
-# a list of fields.
-#
-# Each field is simply of the format:  WORD :: TEXT...
-#
-# Note that typically each comment block is followed by some source code
-# declaration that may need to be kept in the reference.
-#
-# Note that markers can alternatively be written as "@MARKER:" instead of
-# "<MARKER>".  All marker identifiers are converted to lower case during
-# parsing in order to simply sorting.
-#
-# We associate with each block the following source lines that do not begin
-# with a comment.  For example, the following:
-#
-#   /**********************************
-#    *
-#    * <mytag>  blabla
-#    *
-#    */
-#
-#   bla_bla_bla
-#   bilip_bilip
-#
-#   /* - this comment acts as a separator - */
-#
-#   blo_blo_blo
-#
-#
-# will only keep the first two lines of sources with
-# the "blabla" block.
-#
-# However, the comment will be kept, with following source lines if it
-# contains a starting '#' or '@' as in:
-#
-#   /*@.....*/
-#   /*#.....*/
-#   /* @.....*/
-#   /* #.....*/
-#
-
-
-
-#############################################################################
-#
-# The DocCode class is used to store source code lines.
-#
-#   'self.lines' contains a set of source code lines that will be dumped as
-#   HTML in a <PRE> tag.
-#
-#   The object is filled line by line by the parser; it strips the leading
-#   "margin" space from each input line before storing it in 'self.lines'.
-#
-class DocCode:
-
-    def __init__( self, margin = 0 ):
-        self.lines  = []
-        self.margin = margin
-
-
-    def add( self, line ):
-        # remove margin whitespace
-        #
-        if string.strip( line[: self.margin] ) == "":
-            line = line[self.margin :]
-        self.lines.append( line )
-
-
-    def dump( self ):
-        for line in self.lines:
-            print "--" + line
-        print ""
-
-
-    def get_identifier( self ):
-        # this function should never be called
-        #
-        return "UNKNOWN_CODE_IDENTIFIER!"
-
-
-    def dump_html( self, identifiers = None ):
-        # clean the last empty lines
-        #
-        l = len( self.lines ) - 1
-        while l > 0 and string.strip( self.lines[l - 1] ) == "":
-            l = l - 1
-
-        # The code footer should be directly appended to the last code
-        # line to avoid an additional blank line.
-        #
-        sys.stdout.write( code_header )
-        for line in self.lines[0 : l+1]:
-            sys.stdout.write( '\n' + html_format(line) )
-        sys.stdout.write( code_footer )
-
-
-
-#############################################################################
-#
-# The DocParagraph is used to store text paragraphs.
-# 'self.words' is simply a list of words for the paragraph.
-#
-# The paragraph is filled line by line by the parser.
-#
-class DocParagraph:
-
-    def __init__( self ):
-        self.words = []
-
-
-    def add( self, line ):
-        # Get rid of unwanted spaces in the paragraph.
-        #
-        # The following two lines are the same as
-        #
-        #   self.words.extend( string.split( line ) )
-        #
-        # but older Python versions don't have the `extend' attribute.
-        #
-        last = len( self.words )
-        self.words[last : last] = string.split( line )
-
-
-    # This function is used to retrieve the first word of a given
-    # paragraph.
-    #
-    def get_identifier( self ):
-        if self.words:
-            return self.words[0]
-
-        # should never happen
-        #
-        return "UNKNOWN_PARA_IDENTIFIER!"
-
-
-    def get_words( self ):
-        return self.words[:]
-
-
-    def dump( self, identifiers = None ):
-        max_width = 50
-        cursor    = 0
-        line      = ""
-        extra     = None
-        alphanum  = string.lowercase + string.uppercase + string.digits + '_'
-
-        for word in self.words:
-            # process cross references if needed
-            #
-            if identifiers and word and word[0] == '@':
-                word = word[1 :]
-
-                # we need to find non-alphanumeric characters
-                #
-                l = len( word )
-                i = 0
-                while i < l and word[i] in alphanum:
-                    i = i + 1
-
-                if i < l:
-                    extra = word[i :]
-                    word  = word[0 : i]
-
-                block = identifiers.get( word )
-                if block:
-                    word = '<a href="' + block.html_address() + '">' + word + '</a>'
-                else:
-                    word = '?' + word
-
-            if cursor + len( word ) + 1 > max_width:
-                print html_format( line )
-                cursor = 0
-                line   = ""
-
-            line = line + word
-            if not extra:
-                line = line + " "
-
-            cursor = cursor + len( word ) + 1
-
-
-            # Handle trailing periods, commas, etc. at the end of cross
-            # references.
-            #
-            if extra:
-                if cursor + len( extra ) + 1 > max_width:
-                    print html_format( line )
-                    cursor = 0
-                    line   = ""
-
-                line   = line + extra + " "
-                cursor = cursor + len( extra ) + 1
-                extra  = None
-
-        if cursor > 0:
-            print html_format(line)
-
-        # print "�" # for debugging only
-
-
-    def dump_string( self ):
-        s     = ""
-        space = ""
-        for word in self.words:
-            s     = s + space + word
-            space = " "
-
-        return s
-
-
-    def dump_html( self, identifiers = None ):
-        print para_header
-        self.dump( identifiers )
-        print para_footer
-
-
-
-#############################################################################
-#
-# DocContent is used to store the content of a given marker.
-#
-# The "self.items" list contains (field,elements) records, where "field"
-# corresponds to a given structure field or function parameter (indicated
-# by a "::"), or NULL for a normal section of text/code.
-#
-# Hence, the following example:
-#
-#   <MyMarker>
-#      This is an example of what can be put in a content section,
-#
-#      A second line of example text.
-#
-#      x :: A simple test field, with some contents.
-#      y :: Even before, this field has some code contents.
-#           {
-#             y = x+2;
-#           }
-#
-# should be stored as
-#
-#     [ ( None, [ DocParagraph, DocParagraph] ),
-#       ( "x",  [ DocParagraph ] ),
-#       ( "y",  [ DocParagraph, DocCode ] ) ]
-#
-# in 'self.items'.
-#
-# The DocContent object is entirely built at creation time; you must pass a
-# list of input text lines in the "lines_list" parameter.
-#
-class DocContent:
-
-    def __init__( self, lines_list ):
-        self.items  = []
-        code_mode   = 0
-        code_margin = 0
-        text        = []
-        paragraph   = None   # represents the current DocParagraph
-        code        = None   # represents the current DocCode
-
-        elements    = []     # the list of elements for the current field;
-                             # contains DocParagraph or DocCode objects
-
-        field       = None   # the current field
-
-        for aline in lines_list:
-            if code_mode == 0:
-                line   = string.lstrip( aline )
-                l      = len( line )
-                margin = len( aline ) - l
-
-                # if the line is empty, this is the end of the current
-                # paragraph
-                #
-                if l == 0 or line == '{':
-                    if paragraph:
-                        elements.append( paragraph )
-                        paragraph = None
-
-                    if line == "":
-                        continue
-
-                    code_mode   = 1
-                    code_margin = margin
-                    code        = None
-                    continue
-
-                words = string.split( line )
-
-                # test for a field delimiter on the start of the line, i.e.
-                # the token `::'
-                #
-                if len( words ) >= 2 and words[1] == "::":
-                    # start a new field - complete current paragraph if any
-                    #
-                    if paragraph:
-                        elements.append( paragraph )
-                        paragraph = None
-
-                    # append previous "field" to self.items
-                    #
-                    self.items.append( ( field, elements ) )
-
-                    # start new field and elements list
-                    #
-                    field    = words[0]
-                    elements = []
-                    words    = words[2 :]
-
-                # append remaining words to current paragraph
-                #
-                if len( words ) > 0:
-                    line = string.join( words )
-                    if not paragraph:
-                        paragraph = DocParagraph()
-                    paragraph.add( line )
-
-            else:
-                # we are in code mode...
-                #
-                line = aline
-
-                # the code block ends with a line that has a single '}' on
-                # it that is located at the same column that the opening
-                # accolade...
-                #
-                if line == " " * code_margin + '}':
-                    if code:
-                        elements.append( code )
-                        code = None
-
-                    code_mode   = 0
-                    code_margin = 0
-
-                # otherwise, add the line to the current paragraph
-                #
-                else:
-                    if not code:
-                        code = DocCode()
-                    code.add( line )
-
-        if paragraph:
-            elements.append( paragraph )
-
-        if code:
-            elements.append( code )
-
-        self.items.append( ( field, elements ) )
-
-
-    def get_identifier( self ):
-        if self.items:
-            item = self.items[0]
-            for element in item[1]:
-                return element.get_identifier()
-
-        # should never happen
-        #
-        return "UNKNOWN_CONTENT_IDENTIFIER!"
-
-
-    def get_title( self ):
-        if self.items:
-            item = self.items[0]
-            for element in item[1]:
-                return element.dump_string()
-
-        # should never happen
-        #
-        return "UNKNOWN_CONTENT_TITLE!"
-
-
-    def dump( self ):
-        for item in self.items:
-            field = item[0]
-            if field:
-                print "<field " + field + ">"
-
-            for element in item[1]:
-                element.dump()
-
-            if field:
-                print "</field>"
-
-
-    def dump_html( self, identifiers = None ):
-        n        = len( self.items )
-        in_table = 0
-
-        for i in range( n ):
-            item  = self.items[i]
-            field = item[0]
-
-            if not field:
-                if in_table:
-                    print "</td></tr></table>"
-                    in_table = 0
-
-                for element in item[1]:
-                    element.dump_html( identifiers )
-
-            else:
-                if not in_table:
-                    print "<table cellpadding=4><tr valign=top><td>"
-                    in_table = 1
-                else:
-                    print "</td></tr><tr valign=top><td>"
-
-                print "<b>" + field + "</b></td><td>"
-
-                for element in item[1]:
-                    element.dump_html( identifiers )
-
-        if in_table:
-            print "</td></tr></table>"
-
-
-    def dump_html_in_table( self, identifiers = None ):
-        n        = len( self.items )
-        in_table = 0
-
-        for i in range( n ):
-            item  = self.items[i]
-            field = item[0]
-
-            if not field:
-                if item[1]:
-                    print "<tr><td colspan=2>"
-                    for element in item[1]:
-                        element.dump_html( identifiers )
-                    print "</td></tr>"
-
-            else:
-                print "<tr><td><b>" + field + "</b></td><td>"
-
-                for element in item[1]:
-                    element.dump_html( identifiers )
-
-                print "</td></tr>"
-
-
-
-#############################################################################
-#
-# The DocBlock class is used to store a given comment block.  It contains
-# a list of markers, as well as a list of contents for each marker.
-#
-#   "self.items" is a list of (marker, contents) elements, where 'marker' is
-#   a lowercase marker string, and 'contents' is a DocContent object.
-#
-#   "self.source" is simply a list of text lines taken from the uncommented
-#   source itself.
-#
-#   Finally, "self.name" is a simple identifier used to uniquely identify
-#   the block. It is taken from the first word of the first paragraph of the
-#   first marker of a given block, i.e:
-#
-#      <Type> Goo
-#      <Description> Bla bla bla
-#
-#   will have a name of "Goo"
-#
-class DocBlock:
-
-    def __init__( self, block_line_list = [], source_line_list = [] ):
-        self.items    = []               # current ( marker, contents ) list
-        self.section  = None             # section this block belongs to
-        self.filename = "unknown"        # filename defining this block
-        self.lineno   = 0                # line number in filename
-
-        marker        = None             # current marker
-        content       = []               # current content lines list
-        alphanum      = string.letters + string.digits + "_"
-        self.name     = None
-
-        for line in block_line_list:
-            line2  = string.lstrip( line )
-            l      = len( line2 )
-            margin = len( line ) - l
-
-            if l > 3:
-                ender = None
-                if line2[0] == '<':
-                    ender = '>'
-                elif line2[0] == '@':
-                    ender = ':'
-
-                if ender:
-                    i = 1
-                    while i < l and line2[i] in alphanum:
-                        i = i + 1
-                    if i < l and line2[i] == ender:
-                        if marker and content:
-                            self.add( marker, content )
-                        marker  = line2[1 : i]
-                        content = []
-                        line2   = string.lstrip( line2[i+1 :] )
-                        l       = len( line2 )
-                        line    = " " * margin + line2
-
-            content.append( line )
-
-        if marker and content:
-            self.add( marker, content )
-
-        self.source = []
-        if self.items:
-            self.source = source_line_list
-
-        # now retrieve block name when possible
-        #
-        if self.items:
-            first     = self.items[0]
-            self.name = first[1].get_identifier()
-
-
-    # This function adds a new element to 'self.items'.
-    #
-    #   'marker' is a marker string, or None.
-    #   'lines'  is a list of text lines used to compute a list of
-    #            DocContent objects.
-    #
-    def add( self, marker, lines ):
-        # remove the first and last empty lines from the content list
-        #
-        l = len( lines )
-        if l > 0:
-            i = 0
-            while l > 0 and string.strip( lines[l - 1] ) == "":
-                l = l - 1
-            while i < l and string.strip( lines[i] ) == "":
-                i = i + 1
-            lines = lines[i : l]
-            l     = len( lines )
-
-        # add a new marker only if its marker and its content list
-        # are not empty
-        #
-        if l > 0 and marker:
-            content = DocContent( lines )
-            self.items.append( ( string.lower( marker ), content ) )
-
-
-    def find_content( self, marker ):
-        for item in self.items:
-            if ( item[0] == marker ):
-                return item[1]
-        return None
-
-
-    def html_address( self ):
-        section = self.section
-        if section and section.filename:
-            return section.filename + '#' + self.name
-
-        return ""  # this block is not in a section?
-
-
-    def location( self ):
-        return self.filename + ':' + str( self.lineno )
-
-
-    def print_warning( self, message ):
-        sys.stderr.write( "WARNING:" +
-                          self.location() + ": " + message + '\n' )
-
-
-    def print_error( self, message ):
-        sys.stderr.write( "ERROR:" +
-                          self.location() + ": " + message + '\n' )
-        sys.exit()
-
-
-    def dump( self ):
-        for i in range( len( self.items ) ):
-            print "[" + self.items[i][0] + "]"
-            content = self.items[i][1]
-            content.dump()
-
-
-    def dump_html( self, identifiers = None ):
-        types      = ['type', 'struct', 'functype', 'function',
-                      'constant', 'enum', 'macro', 'structure', 'also']
-
-        parameters = ['input', 'inout', 'output', 'return']
-
-        if not self.items:
-            return
-
-        # start of a block
-        #
-        print block_header
-
-        # place html anchor if needed
-        #
-        if self.name:
-            print '<a name="' + self.name + '">'
-            print "<h4>" + self.name + "</h4>"
-            print "</a>"
-
-        # print source code
-        #
-        if not self.source:
-            print block_footer
-            return
-
-        lines = self.source
-        l     = len( lines ) - 1
-        while l >= 0 and string.strip( lines[l] ) == "":
-            l = l - 1
-        print source_header
-        print ""
-        for line in lines[0 : l+1]:
-            print line
-        print source_footer
-
-        in_table = 0
-
-        # dump each (marker,content) element
-        #
-        for element in self.items:
-            marker  = element[0]
-            content = element[1]
-
-            if marker == "description":
-                print description_header
-                content.dump_html( identifiers )
-                print description_footer
-
-            elif not ( marker in types ):
-                sys.stdout.write( marker_header )
-                sys.stdout.write( marker )
-                sys.stdout.write( marker_inter + '\n' )
-                content.dump_html( identifiers )
-                print marker_footer
-
-        print ""
-
-        print block_footer
-
-
-
-#############################################################################
-#
-# The DocSection class is used to store a given documentation section.
-#
-# Each section is made of an identifier, an abstract and a description.
-#
-# For example, look at:
-#
-#   <Section> Basic_Data_Types
-#
-#   <Title> FreeType 2 Basic Data Types
-#
-#   <Abstract>
-#      Definitions of basic FreeType data types
-#
-#   <Description>
-#      FreeType defines several basic data types for all its
-#      operations...
-#
-class DocSection:
-
-    def __init__( self, block ):
-        self.block       = block
-        self.name        = string.lower( block.name )
-        self.abstract    = block.find_content( "abstract" )
-        self.description = block.find_content( "description" )
-        self.elements    = {}
-        self.list        = []
-        self.filename    = self.name + ".html"
-        self.chapter     = None
-
-        # sys.stderr.write( "new section '" + self.name + "'" )
-
-
-    def add_element( self, block ):
-        # check that we don't have a duplicate element in this
-        # section
-        #
-        if self.elements.has_key( block.name ):
-            block.print_error( "duplicate element definition for " +
-                               "'" + block.name + "' " +
-                               "in section " +
-                               "'" + self.name + "'\n" +
-                               "previous definition in " +
-                               "'" + self.elements[block.name].location() + "'" )
-
-        self.elements[block.name] = block
-        self.list.append( block )
-
-
-    def print_warning( self, message ):
-        self.block.print_warning( message )
-
-
-    def print_error( self, message ):
-        self.block.print_error( message )
-
-
-    def dump_html( self, identifiers = None ):
-        """make an HTML page from a given DocSection"""
-
-        # print HTML header
-        #
-        print html_header
-
-        # print title
-        #
-        print section_title_header
-        print self.title
-        print section_title_footer
-
-        # print description
-        #
-        print block_header
-        self.description.dump_html( identifiers )
-        print block_footer
-
-        # print elements
-        #
-        for element in self.list:
-            element.dump_html( identifiers )
-
-        print html_footer
-
-
-class DocSectionList:
-
-    def __init__( self ):
-        self.sections        = {}    # map section names to section objects
-        self.list            = []    # list of sections (in creation order)
-        self.current_section = None  # current section
-        self.identifiers     = {}    # map identifiers to blocks
-
-
-    def append_section( self, block ):
-        name     = string.lower( block.name )
-        abstract = block.find_content( "abstract" )
-
-        if self.sections.has_key( name ):
-            # There is already a section with this name in our list.  We
-            # will try to complete it.
-            #
-            section = self.sections[name]
-            if section.abstract:
-                # This section already has an abstract defined; simply check
-                # that the new section doesn't provide a new one.
-                #
-                if abstract:
-                    section.block.print_error(
-                      "duplicate section definition for " +
-                      "'" + name + "'\n" +
-                      "previous definition in " +
-                      "'" + section.block.location() + "'\n" +
-                      "second definition in " +
-                      "'" + block.location() + "'" )
-            else:
-                # The old section didn't contain an abstract; we are now
-                # going to replace it.
-                #
-                section.abstract    = abstract
-                section.description = block.find_content( "description" )
-                section.block       = block
-
-        else:
-            # a new section
-            #
-            section = DocSection( block )
-            self.sections[name] = section
-            self.list.append( section )
-
-        self.current_section = section
-
-
-    def append_block( self, block ):
-        if block.name:
-            section = block.find_content( "section" )
-            if section:
-                self.append_section( block )
-
-            elif self.current_section:
-                self.current_section.add_element( block )
-                block.section                = self.current_section
-                self.identifiers[block.name] = block
-
-
-    def prepare_files( self, file_prefix = None ):
-        # prepare the section list, by computing section filenames and the
-        # index
-        #
-        if file_prefix:
-            prefix = file_prefix + "-"
-        else:
-            prefix = ""
-
-        # compute section names
-        #
-        for section in self.sections.values():
-            title_content     = section.block.find_content( "title" )
-            if title_content:
-                section.title = title_content.get_title()
-            else:
-                section.title = "UNKNOWN_SECTION_TITLE!"
-
-
-        # sort section elements according to the <order> marker if available
-        #
-        for section in self.sections.values():
-            order = section.block.find_content( "order" )
-            if order:
-                # sys.stderr.write( "<order> found at "
-                #                   + section.block.location() + '\n' )
-                order_list = []
-                for item in order.items:
-                    for element in item[1]:
-                        words = None
-                        try:
-                            words = element.get_words()
-                        except:
-                            section.block.print_warning(
-                              "invalid content in <order> marker\n" )
-                        if words:
-                            for word in words:
-                                block = self.identifiers.get( word )
-                                if block:
-                                    if block.section == section:
-                                        order_list.append( block )
-                                    else:
-                                        section.block.print_warning(
-                                          "invalid reference to " +
-                                          "'" + word + "' " +
-                                          "defined in other section" )
-                                else:
-                                    section.block.print_warning(
-                                      "invalid reference to " +
-                                      "'" + word + "'" )
-
-                # now sort the list of blocks according to the order list
-                #
-                new_list = order_list[:]
-                for block in section.list:
-                    if not block in order_list:
-                        new_list.append( block )
-
-                section.list = new_list
-
-        # compute section filenames
-        #
-        for section in self.sections.values():
-            section.filename = prefix + section.name + ".html"
-
-        self.toc_filename   = prefix + "toc.html"
-        self.index_filename = prefix + "index.html"
-
-        # compute the sorted list of identifiers for the index
-        #
-        self.index = self.identifiers.keys()
-        self.index.sort( index_sort )
-
-
-    def dump_html_sections( self ):
-        for section in self.sections.values():
-            if section.filename:
-                output = open_output( section.filename )
-
-                section.dump_html( self.identifiers )
-
-                close_output( output )
-
-
-    def dump_html_index( self ):
-        output = open_output( self.index_filename )
-
-        num_columns = 3
-        total       = len( self.index )
-        line        = 0
-
-        print html_header
-        print "<center><h1>General Index</h1></center>"
-        print "<center><table cellpadding=5><tr valign=top><td>"
-
-        for ident in self.index:
-            block = self.identifiers[ident]
-            if block:
-                sys.stdout.write( '<a href="' + block.html_address() + '">' )
-                sys.stdout.write( block.name )
-                sys.stdout.write( '</a><br>' + '\n' )
-
-                if line * num_columns >= total:
-                    print "</td><td>"
-                    line = 0
-                else:
-                    line = line + 1
-            else:
-                sys.stderr.write( "identifier '" + ident +
-                                  "' has no definition" + '\n' )
-
-        print "</tr></table></center>"
-        print html_footer
-
-        close_output( output )
-
-
-
-# Filter a given list of DocBlocks.  Returns a new list of DocBlock objects
-# that only contains element whose "type" (i.e. first marker) is in the
-# "types" parameter.
-#
-class DocChapter:
-
-    def __init__( self, block ):
-        self.sections_names = []    # ordered list of section names
-        self.sections       = []    # ordered list of DocSection objects
-                                    # for this chapter
-        self.block          = block
-
-        # look for chapter title
-        content = block.find_content( "title" )
-        if content:
-            self.title = content.get_title()
-        else:
-            self.title = "UNKNOWN CHAPTER TITLE"
-
-        # look for section list
-        content = block.find_content( "sections" )
-        if not content:
-            block.print_error( "chapter has no <sections> content" )
-
-        # compute list of section names
-        slist = []
-        for item in content.items:
-            for element in item[1]:
-                try:
-                    words        = element.get_words()
-                    l            = len( slist )
-                    slist[l : l] = words
-                except:
-                    block.print_warning(
-                      "invalid content in <sections> marker" )
-
-        self.section_names = slist
-
-
-class DocDocument:
-
-    def __init__( self ):
-        self.section_list  = DocSectionList()   # section list object
-        self.chapters      = []                 # list of chapters
-        self.lost_sections = []                 # list of sections with
-                                                # no chapter
-
-    def append_block( self, block ):
-        if block.name:
-            content = block.find_content( "chapter" )
-            if content:
-                # a chapter definition -- add it to our list
-                #
-                chapter = DocChapter( block )
-                self.chapters.append( chapter )
-            else:
-                self.section_list.append_block( block )
-
-
-    def prepare_chapters( self ):
-        # check section names
-        #
-        for chapter in self.chapters:
-            slist = []
-            for name in chapter.section_names:
-                 section = self.section_list.sections.get( name )
-                 if not section:
-                     chapter.block.print_warning(
-                       "invalid reference to unknown section '" + name + "'" )
-                 else:
-                     section.chapter = chapter
-                     slist.append( section )
-
-            chapter.sections = slist
-
-        for section in self.section_list.list:
-            if not section.chapter:
-                section.block.print_warning(
-                  "section '" + section.name + "' is not in any chapter" )
-                self.lost_sections.append( section )
-
-
-    def prepare_files( self, file_prefix = None ):
-        self.section_list.prepare_files( file_prefix )
-        self.prepare_chapters()
-
-
-    def dump_toc_html( self ):
-        # dump an html table of contents
-        #
-        output = open_output( self.section_list.toc_filename )
-
-        print html_header
-
-        print "<center><h1>Table of Contents</h1></center>"
-
-        for chapter in self.chapters:
-            print chapter_header + chapter.title + chapter_inter
-
-            print "<table cellpadding=5>"
-            for section in chapter.sections:
-                if section.abstract:
-                    print "<tr valign=top><td>"
-                    sys.stdout.write( '<a href="' + section.filename + '">' )
-                    sys.stdout.write( section.title )
-                    sys.stdout.write( "</a></td><td>" + '\n' )
-                    section.abstract.dump_html( self.section_list.identifiers )
-                    print "</td></tr>"
-
-            print "</table>"
-
-            print chapter_footer
-
-        # list lost sections
-        #
-        if self.lost_sections:
-            print chapter_header + "OTHER SECTIONS:" + chapter_inter
-
-            print "<table cellpadding=5>"
-            for section in self.lost_sections:
-                if section.abstract:
-                    print "<tr valign=top><td>"
-                    sys.stdout.write( '<a href="' + section.filename + '">' )
-                    sys.stdout.write( section.title )
-                    sys.stdout.write( "</a></td><td>" + '\n' )
-                    section.abstract.dump_html( self.section_list.identifiers )
-                    print "</td></tr>"
-
-            print "</table>"
-
-            print chapter_footer
-
-        # index
-        #
-        print chapter_header + '<a href="' + self.section_list.index_filename + '">Index</a>' + chapter_footer
-
-        print html_footer
-
-        close_output( output )
-
-
-    def dump_index_html( self ):
-        self.section_list.dump_html_index()
-
-
-    def dump_sections_html( self ):
-        self.section_list.dump_html_sections()
-
-
-def filter_blocks_by_type( block_list, types ):
-    new_list = []
-    for block in block_list:
-        if block.items:
-            element = block.items[0]
-            marker  = element[0]
-            if marker in types:
-                new_list.append( block )
-
-    return new_list
-
-
-def filter_section_blocks( block ):
-    return block.section != None
-
-
-# Perform a lexicographical comparison of two DocBlock objects.  Returns -1,
-# 0 or 1.
-#
-def block_lexicographical_compare( b1, b2 ):
-    if not b1.name:
-        return -1
-    if not b2.name:
-        return 1
-
-    id1 = string.lower( b1.name )
-    id2 = string.lower( b2.name )
-
-    if id1 < id2:
-        return -1
-    elif id1 == id2:
-        return 0
-    else:
-        return 1
-
-
-# Dump a list block as a single HTML page.
-#
-def dump_html_1( block_list ):
-    print html_header
-
-    for block in block_list:
-        block.dump_html()
-
-    print html_footer
-
-
-def file_exists( pathname ):
-    result = 1
-    try:
-        file = open( pathname, "r" )
-        file.close()
-    except:
-        result = None
-
-    return result
-
-
-def add_new_block( list, filename, lineno, block_lines, source_lines ):
-    """add a new block to the list"""
-    block          = DocBlock( block_lines, source_lines )
-    block.filename = filename
-    block.lineno   = lineno
-    list.append( block )
-
-
-def make_block_list( args = None ):
-    """parse a file and extract comments blocks from it"""
-
-    file_list = []
-    # sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
-
-    if not args:
-        args = sys.argv[1 :]
-
-    for pathname in args:
-        if string.find( pathname, '*' ) >= 0:
-            newpath = glob.glob( pathname )
-            newpath.sort()  # sort files -- this is important because
-                            # of the order of files
-        else:
-            newpath = [pathname]
-
-        last = len( file_list )
-        file_list[last : last] = newpath
-
-    if len( file_list ) == 0:
-        file_list = None
-    else:
-        # now filter the file list to remove non-existing ones
-        file_list = filter( file_exists, file_list )
-
-    list   = []
-    block  = []
-    format = 0
-    lineno = 0
-
-    # We use "format" to store the state of our parser:
-    #
-    #  0 - wait for beginning of comment
-    #  1 - parse comment format 1
-    #  2 - parse comment format 2
-    #
-    #  4 - wait for beginning of source (or comment?)
-    #  5 - process source
-    #
-    comment = []
-    source  = []
-    state   = 0
-
-    fileinput.close()
-    for line in fileinput.input( file_list ):
-        l = len( line )
-        if l > 0 and line[l - 1] == '\012':
-            line = line[0 : l-1]
-
-        # stripped version of the line
-        #
-        line2 = string.strip( line )
-        l     = len( line2 )
-
-        # if this line begins with a comment and we are processing some
-        # source, exit to state 0
-        #
-        # unless we encounter something like:
-        #
-        #    /*@.....
-        #    /*#.....
-        #
-        #    /* @.....
-        #    /* #.....
-        #
-        if format >= 4 and l > 2 and line2[0 : 2] == '/*':
-            if l < 4 or ( line2[2] != '@' and line2[2 : 4] != ' @' and
-                          line2[2] != '#' and line2[2 : 4] != ' #'):
-                add_new_block( list, fileinput.filename(),
-                               lineno, block, source )
-                format = 0
-
-        if format == 0:  #### wait for beginning of comment ####
-            if l > 3 and line2[0 : 3] == '/**':
-                i = 3
-                while i < l and line2[i] == '*':
-                    i = i + 1
-
-                if i == l:
-                    # this is '/**' followed by any number of '*', the
-                    # beginning of a Format 1 block
-                    #
-                    block  = []
-                    source = []
-                    format = 1
-                    lineno = fileinput.filelineno()
-
-                elif i == l - 1 and line2[i] == '/':
-                    # this is '/**' followed by any number of '*', followed
-                    # by a '/', i.e. the beginning of a Format 2 or 3 block
-                    #
-                    block  = []
-                    source = []
-                    format = 2
-                    lineno = fileinput.filelineno()
-
-        ##############################################################
-        #
-        # FORMAT 1
-        #
-        elif format == 1:
-
-            # If the line doesn't begin with a "*", something went wrong,
-            # and we must exit, and forget the current block.
-            #
-            if l == 0 or line2[0] != '*':
-                block  = []
-                format = 0
-
-            # Otherwise, we test for an end of block, which is an arbitrary
-            # number of '*', followed by '/'.
-            #
-            else:
-                i = 1
-                while i < l and line2[i] == '*':
-                    i = i + 1
-
-                # test for the end of the block
-                #
-                if i < l and line2[i] == '/':
-                    if block != []:
-                        format = 4
-                    else:
-                        format = 0
-                else:
-                    # otherwise simply append line to current block
-                    #
-                    block.append( line2[i :] )
-
-                continue
-
-        ##############################################################
-        #
-        # FORMAT 2
-        #
-        elif format == 2:
-
-            # If the line doesn't begin with '/*' and end with '*/', this is
-            # the end of the format 2 format.
-            #
-            if l < 4 or line2[: 2] != '/*' or line2[-2 :] != '*/':
-                if block != []:
-                    format = 4
-                else:
-                    format = 0
-            else:
-                # remove the start and end comment delimiters, then
-                # right-strip the line
-                #
-                line2 = string.rstrip( line2[2 : -2] )
-
-                # check for end of a format2 block, i.e. a run of '*'
-                #
-                if string.count( line2, '*' ) == l - 4:
-                    if block != []:
-                        format = 4
-                    else:
-                        format = 0
-                else:
-                    # otherwise, add the line to the current block
-                    #
-                    block.append( line2 )
-
-                continue
-
-        if format >= 4:  #### source processing ####
-            if l > 0:
-                format = 5
-
-            if format == 5:
-                source.append( line )
-
-    if format >= 4:
-        add_new_block( list, fileinput.filename(), lineno, block, source )
-
-    return list
-
-
-
-# This function is only used for debugging
-#
-def dump_block_list( list ):
-    """dump a comment block list"""
-    for block in list:
-        print "----------------------------------------"
-        for line in block[0]:
-            print line
-        for line in block[1]:
-            print line
-
-    print "---------the end-----------------------"
-
-
-def usage():
-    print "\nDocMaker 0.1 Usage information\n"
-    print "  docmaker [options] file1 [ file2 ... ]\n"
-    print "using the following options:\n"
-    print "  -h : print this page"
-    print "  -t : set project title, as in '-t \"My Project\"'"
-    print "  -o : set output directory, as in '-o mydir'"
-    print "  -p : set documentation prefix, as in '-p ft2'"
-    print ""
-    print "  --title  : same as -t, as in '--title=\"My Project\"'"
-    print "  --output : same as -o, as in '--output=mydir'"
-    print "  --prefix : same as -p, as in '--prefix=ft2'"
-
-
-def main( argv ):
-    """main program loop"""
-
-    global output_dir, project_title, project_prefix
-    global html_header, html_header1, html_header2, html_header3
-
-    try:
-        opts, args = getopt.getopt( sys.argv[1:],
-                                    "ht:o:p:",
-                                    [ "help", "title=", "output=", "prefix=" ] )
-
-    except getopt.GetoptError:
-        usage()
-        sys.exit( 2 )
-
-    if args == []:
-        usage()
-        sys.exit( 1 )
-
-    # process options
-    #
-    project_title  = "Project"
-    project_prefix = None
-    output_dir     = None
-
-    for opt in opts:
-        if opt[0] in ( "-h", "--help" ):
-            usage()
-            sys.exit( 0 )
-
-        if opt[0] in ( "-t", "--title" ):
-            project_title = opt[1]
-
-        if opt[0] in ( "-o", "--output" ):
-            output_dir = opt[1]
-
-        if opt[0] in ( "-p", "--prefix" ):
-            project_prefix = opt[1]
-
-    html_header = html_header_1 + project_title + html_header_2 + project_title + html_header_3
-    check_output( )
-
-    # we begin by simply building a list of DocBlock elements
-    #
-    list = make_block_list( args )
-
-    # now, sort the blocks into sections
-    #
-    document = DocDocument()
-    for block in list:
-        document.append_block( block )
-
-    document.prepare_files( project_prefix )
-
-    document.dump_toc_html()
-    document.dump_sections_html()
-    document.dump_index_html()
-
-# if called from the command line
-#
-if __name__ == '__main__':
-    main( sys.argv )
-
-
-# eof
--- a/docs/glnames.py
+++ /dev/null
@@ -1,1706 +1,0 @@
-#!/usr/bin/env python
-#
-
-#
-# FreeType 2 glyph name builder
-#
-
-
-# Copyright 1996-2000 by
-# David Turner, Robert Wilhelm, and Werner Lemberg.
-#
-# This file is part of the FreeType project, and may only be used, modified,
-# and distributed under the terms of the FreeType project license,
-# LICENSE.TXT.  By continuing to use, modify, or distribute this file you
-# indicate that you have read the license and understand and accept it
-# fully.
-
-
-"""\
-
-usage: %s <output-file>
-
-  This very simple python script is used to generate the glyph names
-  tables defined in the PSNames module.
-
-  Its single argument is the name of the header file to be created.
-"""
-
-
-import sys, string
-
-
-# This table is used to name the glyph according to the Macintosh
-# specification.  It is used by the TrueType Postscript names table
-#
-mac_standard_names = \
-[
-  # 0
-  ".notdef", ".null", "CR", "space", "exclam",
-  "quotedbl", "numbersign", "dollar", "percent", "ampersand",
-
-  # 10
-  "quotesingle", "parenleft", "parenright", "asterisk", "plus",
-  "comma", "hyphen", "period", "slash", "zero",
-
-  # 20
-  "one", "two", "three", "four", "five",
-  "six", "seven", "eight", "nine", "colon",
-
-  # 30
-  "semicolon", "less", "equal", "greater", "question",
-  "at", "A", "B", "C", "D",
-
-  # 40
-  "E", "F", "G", "H", "I",
-  "J", "K", "L", "M", "N",
-
-  # 50
-  "O", "P", "Q", "R", "S",
-  "T", "U", "V", "W", "X",
-
-  # 60
-  "Y", "Z", "bracketleft", "backslash", "bracketright",
-  "asciicircum", "underscore", "grave", "a", "b",
-
-  # 70
-  "c", "d", "e", "f", "g",
-  "h", "i", "j", "k", "l",
-
-  # 80
-  "m", "n", "o", "p", "q",
-  "r", "s", "t", "u", "v",
-
-  # 90
-  "w", "x", "y", "z", "braceleft",
-  "bar", "braceright", "asciitilde", "Adieresis", "Aring",
-
-  # 100
-  "Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
-  "aacute", "agrave", "acircumflex", "adieresis", "atilde",
-
-  # 110
-  "aring", "ccedilla", "eacute", "egrave", "ecircumflex",
-  "edieresis", "iacute", "igrave", "icircumflex", "idieresis",
-
-  # 120
-  "ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
-  "otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
-
-  # 130
-  "dagger", "degree", "cent", "sterling", "section",
-  "bullet", "paragraph", "germandbls", "registered", "copyright",
-
-  # 140
-  "trademark", "acute", "dieresis", "notequal", "AE",
-  "Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
-
-  # 150
-  "yen", "mu", "partialdiff", "summation", "product",
-  "pi", "integral", "ordfeminine", "ordmasculine", "Omega",
-
-  # 160
-  "ae", "oslash", "questiondown", "exclamdown", "logicalnot",
-  "radical", "florin", "approxequal", "Delta", "guillemotleft",
-
-  # 170
-  "guillemotright", "ellipsis", "nbspace", "Agrave", "Atilde",
-  "Otilde", "OE", "oe", "endash", "emdash",
-
-  # 180
-  "quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
-  "lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
-
-  # 190
-  "guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
-  "periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
-    "Acircumflex",
-
-  # 200
-  "Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
-  "Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
-
-  # 210
-  "apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
-  "dotlessi", "circumflex", "tilde", "macron", "breve",
-
-  # 220
-  "dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
-  "caron", "Lslash", "lslash", "Scaron", "scaron",
-
-  # 230
-  "Zcaron", "zcaron", "brokenbar", "Eth", "eth",
-  "Yacute", "yacute", "Thorn", "thorn", "minus",
-
-  # 240
-  "multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
-  "onequarter", "threequarters", "franc", "Gbreve", "gbreve",
-
-  # 250
-  "Idot", "Scedilla", "scedilla", "Cacute", "cacute",
-  "Ccaron", "ccaron", "dmacron"
-]
-
-
-t1_standard_strings = \
-[
-  # 0
-  ".notdef", "space", "exclam", "quotedbl", "numbersign",
-  "dollar", "percent", "ampersand", "quoteright", "parenleft",
-
-  # 10
-  "parenright", "asterisk", "plus", "comma", "hyphen",
-  "period", "slash", "zero", "one", "two",
-
-  # 20
-  "three", "four", "five", "six", "seven",
-  "eight", "nine", "colon", "semicolon", "less",
-
-  # 30
-  "equal", "greater", "question", "at", "A",
-  "B", "C", "D", "E", "F",
-
-  # 40
-  "G", "H", "I", "J", "K",
-  "L", "M", "N", "O", "P",
-
-  # 50
-  "Q", "R", "S", "T", "U",
-  "V", "W", "X", "Y", "Z",
-
-  # 60
-  "bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
-  "quoteleft", "a", "b", "c", "d",
-
-  # 70
-  "e", "f", "g", "h", "i",
-  "j", "k", "l", "m", "n",
-
-  # 80
-  "o", "p", "q", "r", "s",
-  "t", "u", "v", "w", "x",
-
-  # 90
-  "y", "z", "braceleft", "bar", "braceright",
-  "asciitilde", "exclamdown", "cent", "sterling", "fraction",
-
-  # 100
-  "yen", "florin", "section", "currency", "quotesingle",
-  "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
-
-  # 110
-  "fl", "endash", "dagger", "daggerdbl", "periodcenter",
-  "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
-
-  # 120
-  "guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
-  "acute", "circumflex", "tilde", "macron", "breve",
-
-  # 130
-  "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
-  "ogonek", "caron", "emdash", "AE", "ordfeminine",
-
-  # 140
-  "Lslash", "Oslash", "OE", "ordmasculine", "ae",
-  "dotlessi", "Islash", "oslash", "oe", "germandbls",
-
-  # 150
-  "onesuperior", "logicalnot", "mu", "trademark", "Eth",
-  "onehalf", "plusminus", "Thorn", "onequarter", "divide",
-
-  # 160
-  "brokenbar", "degree", "thorn", "threequarters", "twosuperior",
-  "registered", "minus", "eth", "multiply", "threesuperior",
-
-  # 170
-  "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
-  "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
-
-  # 180
-  "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
-  "Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
-
-  # 190
-  "Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
-  "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
-
-  # 200
-  "aacute", "acircumflex", "adieresis", "agrave", "aring",
-  "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
-
-  # 210
-  "egrave", "iacute", "icircumflex", "idieresis", "igrave",
-  "ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
-
-  # 220
-  "otilde", "scaron", "uacute", "ucircumflex", "udieresis",
-  "ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
-
-  # 230
-  "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
-    "Acutesmall",
-  "parenleftsuperior", "parenrightsuperior", "twodotenleader",
-    "onedotenleader", "zerooldstyle",
-
-  # 240
-  "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
-    "fiveoldstyle",
-  "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
-    "commasuperior",
-
-  # 250
-  "threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
-    "bsuperior",
-  "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
-
-  # 260
-  "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
-  "tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
-
-  # 270
-  "parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
-    "Asmall",
-  "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
-
-  # 280
-  "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
-  "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
-
-  # 290
-  "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
-  "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
-
-  # 300
-  "colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
-  "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
-    "Dieresissmall",
-
-  # 310
-  "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
-  "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
-    "questiondownsmall",
-
-  # 320
-  "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
-  "twothirds", "zerosuperior", "foursuperior", "fivesuperior",
-    "sixsuperior",
-
-  # 330
-  "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
-    "oneinferior",
-  "twoinferior", "threeinferior", "fourinferior", "fiveinferior",
-    "sixinferior",
-
-  # 340
-  "seveninferior", "eightinferior", "nineinferior", "centinferior",
-    "dollarinferior",
-  "periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
-    "Acircumflexsmall",
-
-  # 350
-  "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
-  "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
-    "Igravesmall",
-
-  # 360
-  "Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
-    "Ntildesmall",
-  "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
-    "Odieresissmall",
-
-  # 370
-  "OEsmall", "Oslashsmall", "Ugravesmall", "Uacautesmall",
-    "Ucircumflexsmall",
-  "Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
-    "001.000",
-
-  # 380
-  "001.001", "001.002", "001.003", "Black", "Bold",
-  "Book", "Light", "Medium", "Regular", "Roman",
-
-  # 390
-  "Semibold"
-]
-
-
-t1_standard_encoding = \
-[
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,   0,   1,   2,   3,   4,   5,   6,   7,   8,
-    9,  10,  11,  12,  13,  14,  15,  16,  17,  18,
-
-   19,  20,  21,  22,  23,  24,  25,  26,  27,  28,
-   29,  30,  31,  32,  33,  34,  35,  36,  37,  38,
-   39,  40,  41,  42,  43,  44,  45,  46,  47,  48,
-   49,  50,  51,  52,  53,  54,  55,  56,  57,  58,
-   59,  60,  61,  62,  63,  64,  65,  66,  67,  68,
-
-   69,  70,  71,  72,  73,  74,  75,  76,  77,  78,
-   79,  80,  81,  82,  83,  84,  85,  86,  87,  88,
-   89,  90,  91,  92,  93,  94,  95,   0,   0,   0,
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,  96,  97,  98,  99, 100, 101, 102, 103, 104,
-  105, 106, 107, 108, 109, 110,   0, 111, 112, 113,
-  114,   0, 115, 116, 117, 118, 119, 120, 121, 122,
-    0, 123,   0, 124, 125, 126, 127, 128, 129, 130,
-
-  131,   0, 132, 133,   0, 134, 135, 136, 137,   0,
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,   0,   0,   0,   0, 138,   0, 139,   0,   0,
-    0,   0, 140, 141, 142, 143,   0,   0,   0,   0,
-    0, 144,   0,   0,   0, 145,   0,   0, 146, 147,
-
-  148, 149,   0,   0,   0,   0
-]
-
-
-t1_expert_encoding = \
-[
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,   0,   1, 229, 230,   0, 231, 232, 233, 234,
-  235, 236, 237, 238,  13,  14,  15,  99, 239, 240,
-
-  241, 242, 243, 244, 245, 246, 247, 248,  27,  28,
-  249, 250, 251, 252,   0, 253, 254, 255, 256, 257,
-    0,   0,   0, 258,   0,   0, 259, 260, 261, 262,
-    0,   0, 263, 264, 265,   0, 266, 109, 110, 267,
-  268, 269,   0, 270, 271, 272, 273, 274, 275, 276,
-
-  277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
-  287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
-  297, 298, 299, 300, 301, 302, 303,   0,   0,   0,
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-
-    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
-    0, 304, 305, 306,   0,   0, 307, 308, 309, 310,
-  311,   0, 312,   0,   0, 312,   0,   0, 314, 315,
-    0,   0, 316, 317, 318,   0,   0,   0, 158, 155,
-  163, 319, 320, 321, 322, 323, 324, 325,   0,   0,
-
-  326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
-  333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
-  343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
-  353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
-  363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
-
-  373, 374, 375, 376, 377, 378
-]
-
-
-# This data has been taken literally from the file `glyphlist.txt',
-# version 1.2, 22 Oct 1998.  It is available from
-#
-#   http://partners.adobe.com/asn/developer/typeforum/unicodegn.html
-#
-adobe_glyph_list = """\
-0041;A;LATIN CAPITAL LETTER A
-00C6;AE;LATIN CAPITAL LETTER AE
-01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE
-F7E6;AEsmall;LATIN SMALL CAPITAL LETTER AE
-00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE
-F7E1;Aacutesmall;LATIN SMALL CAPITAL LETTER A WITH ACUTE
-0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE
-00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX
-F7E2;Acircumflexsmall;LATIN SMALL CAPITAL LETTER A WITH CIRCUMFLEX
-F6C9;Acute;CAPITAL ACUTE ACCENT
-F7B4;Acutesmall;SMALL CAPITAL ACUTE ACCENT
-00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS
-F7E4;Adieresissmall;LATIN SMALL CAPITAL LETTER A WITH DIAERESIS
-00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE
-F7E0;Agravesmall;LATIN SMALL CAPITAL LETTER A WITH GRAVE
-0391;Alpha;GREEK CAPITAL LETTER ALPHA
-0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS
-0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON
-0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK
-00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE
-01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
-F7E5;Aringsmall;LATIN SMALL CAPITAL LETTER A WITH RING ABOVE
-F761;Asmall;LATIN SMALL CAPITAL LETTER A
-00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE
-F7E3;Atildesmall;LATIN SMALL CAPITAL LETTER A WITH TILDE
-0042;B;LATIN CAPITAL LETTER B
-0392;Beta;GREEK CAPITAL LETTER BETA
-F6F4;Brevesmall;SMALL CAPITAL BREVE
-F762;Bsmall;LATIN SMALL CAPITAL LETTER B
-0043;C;LATIN CAPITAL LETTER C
-0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE
-F6CA;Caron;CAPITAL CARON
-F6F5;Caronsmall;SMALL CAPITAL CARON
-010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON
-00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA
-F7E7;Ccedillasmall;LATIN SMALL CAPITAL LETTER C WITH CEDILLA
-0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX
-010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE
-F7B8;Cedillasmall;SMALL CAPITAL CEDILLA
-03A7;Chi;GREEK CAPITAL LETTER CHI
-F6F6;Circumflexsmall;SMALL CAPITAL MODIFIER LETTER CIRCUMFLEX ACCENT
-F763;Csmall;LATIN SMALL CAPITAL LETTER C
-0044;D;LATIN CAPITAL LETTER D
-010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON
-0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE
-2206;Delta;INCREMENT
-0394;Delta;GREEK CAPITAL LETTER DELTA;Duplicate
-F6CB;Dieresis;CAPITAL DIAERESIS
-F6CC;DieresisAcute;CAPITAL DIAERESIS ACUTE ACCENT
-F6CD;DieresisGrave;CAPITAL DIAERESIS GRAVE ACCENT
-F7A8;Dieresissmall;SMALL CAPITAL DIAERESIS
-F6F7;Dotaccentsmall;SMALL CAPITAL DOT ABOVE
-F764;Dsmall;LATIN SMALL CAPITAL LETTER D
-0045;E;LATIN CAPITAL LETTER E
-00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE
-F7E9;Eacutesmall;LATIN SMALL CAPITAL LETTER E WITH ACUTE
-0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE
-011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON
-00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX
-F7EA;Ecircumflexsmall;LATIN SMALL CAPITAL LETTER E WITH CIRCUMFLEX
-00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS
-F7EB;Edieresissmall;LATIN SMALL CAPITAL LETTER E WITH DIAERESIS
-0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE
-00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE
-F7E8;Egravesmall;LATIN SMALL CAPITAL LETTER E WITH GRAVE
-0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON
-014A;Eng;LATIN CAPITAL LETTER ENG
-0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK
-0395;Epsilon;GREEK CAPITAL LETTER EPSILON
-0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS
-F765;Esmall;LATIN SMALL CAPITAL LETTER E
-0397;Eta;GREEK CAPITAL LETTER ETA
-0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS
-00D0;Eth;LATIN CAPITAL LETTER ETH
-F7F0;Ethsmall;LATIN SMALL CAPITAL LETTER ETH
-20AC;Euro;EURO SIGN
-0046;F;LATIN CAPITAL LETTER F
-F766;Fsmall;LATIN SMALL CAPITAL LETTER F
-0047;G;LATIN CAPITAL LETTER G
-0393;Gamma;GREEK CAPITAL LETTER GAMMA
-011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE
-01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON
-011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX
-0122;Gcommaaccent;LATIN CAPITAL LETTER G WITH CEDILLA
-0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE
-F6CE;Grave;CAPITAL GRAVE ACCENT
-F760;Gravesmall;SMALL CAPITAL GRAVE ACCENT
-F767;Gsmall;LATIN SMALL CAPITAL LETTER G
-0048;H;LATIN CAPITAL LETTER H
-25CF;H18533;BLACK CIRCLE
-25AA;H18543;BLACK SMALL SQUARE
-25AB;H18551;WHITE SMALL SQUARE
-25A1;H22073;WHITE SQUARE
-0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE
-0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX
-F768;Hsmall;LATIN SMALL CAPITAL LETTER H
-F6CF;Hungarumlaut;CAPITAL DOUBLE ACUTE ACCENT
-F6F8;Hungarumlautsmall;SMALL CAPITAL DOUBLE ACUTE ACCENT
-0049;I;LATIN CAPITAL LETTER I
-0132;IJ;LATIN CAPITAL LIGATURE IJ
-00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE
-F7ED;Iacutesmall;LATIN SMALL CAPITAL LETTER I WITH ACUTE
-012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE
-00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX
-F7EE;Icircumflexsmall;LATIN SMALL CAPITAL LETTER I WITH CIRCUMFLEX
-00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS
-F7EF;Idieresissmall;LATIN SMALL CAPITAL LETTER I WITH DIAERESIS
-0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE
-2111;Ifraktur;BLACK-LETTER CAPITAL I
-00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE
-F7EC;Igravesmall;LATIN SMALL CAPITAL LETTER I WITH GRAVE
-012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON
-012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK
-0399;Iota;GREEK CAPITAL LETTER IOTA
-03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
-038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS
-F769;Ismall;LATIN SMALL CAPITAL LETTER I
-0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE
-004A;J;LATIN CAPITAL LETTER J
-0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX
-F76A;Jsmall;LATIN SMALL CAPITAL LETTER J
-004B;K;LATIN CAPITAL LETTER K
-039A;Kappa;GREEK CAPITAL LETTER KAPPA
-0136;Kcommaaccent;LATIN CAPITAL LETTER K WITH CEDILLA
-F76B;Ksmall;LATIN SMALL CAPITAL LETTER K
-004C;L;LATIN CAPITAL LETTER L
-F6BF;LL;LATIN CAPITAL LETTER LL
-0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE
-039B;Lambda;GREEK CAPITAL LETTER LAMDA
-013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON
-013B;Lcommaaccent;LATIN CAPITAL LETTER L WITH CEDILLA
-013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT
-0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE
-F6F9;Lslashsmall;LATIN SMALL CAPITAL LETTER L WITH STROKE
-F76C;Lsmall;LATIN SMALL CAPITAL LETTER L
-004D;M;LATIN CAPITAL LETTER M
-F6D0;Macron;CAPITAL MACRON
-F7AF;Macronsmall;SMALL CAPITAL MACRON
-F76D;Msmall;LATIN SMALL CAPITAL LETTER M
-039C;Mu;GREEK CAPITAL LETTER MU
-004E;N;LATIN CAPITAL LETTER N
-0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE
-0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON
-0145;Ncommaaccent;LATIN CAPITAL LETTER N WITH CEDILLA
-F76E;Nsmall;LATIN SMALL CAPITAL LETTER N
-00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE
-F7F1;Ntildesmall;LATIN SMALL CAPITAL LETTER N WITH TILDE
-039D;Nu;GREEK CAPITAL LETTER NU
-004F;O;LATIN CAPITAL LETTER O
-0152;OE;LATIN CAPITAL LIGATURE OE
-F6FA;OEsmall;LATIN SMALL CAPITAL LIGATURE OE
-00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE
-F7F3;Oacutesmall;LATIN SMALL CAPITAL LETTER O WITH ACUTE
-014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE
-00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX
-F7F4;Ocircumflexsmall;LATIN SMALL CAPITAL LETTER O WITH CIRCUMFLEX
-00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS
-F7F6;Odieresissmall;LATIN SMALL CAPITAL LETTER O WITH DIAERESIS
-F6FB;Ogoneksmall;SMALL CAPITAL OGONEK
-00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE
-F7F2;Ogravesmall;LATIN SMALL CAPITAL LETTER O WITH GRAVE
-01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN
-0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
-014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON
-2126;Omega;OHM SIGN
-03A9;Omega;GREEK CAPITAL LETTER OMEGA;Duplicate
-038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS
-039F;Omicron;GREEK CAPITAL LETTER OMICRON
-038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS
-00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE
-01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
-F7F8;Oslashsmall;LATIN SMALL CAPITAL LETTER O WITH STROKE
-F76F;Osmall;LATIN SMALL CAPITAL LETTER O
-00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE
-F7F5;Otildesmall;LATIN SMALL CAPITAL LETTER O WITH TILDE
-0050;P;LATIN CAPITAL LETTER P
-03A6;Phi;GREEK CAPITAL LETTER PHI
-03A0;Pi;GREEK CAPITAL LETTER PI
-03A8;Psi;GREEK CAPITAL LETTER PSI
-F770;Psmall;LATIN SMALL CAPITAL LETTER P
-0051;Q;LATIN CAPITAL LETTER Q
-F771;Qsmall;LATIN SMALL CAPITAL LETTER Q
-0052;R;LATIN CAPITAL LETTER R
-0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE
-0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON
-0156;Rcommaaccent;LATIN CAPITAL LETTER R WITH CEDILLA
-211C;Rfraktur;BLACK-LETTER CAPITAL R
-03A1;Rho;GREEK CAPITAL LETTER RHO
-F6FC;Ringsmall;SMALL CAPITAL RING ABOVE
-F772;Rsmall;LATIN SMALL CAPITAL LETTER R
-0053;S;LATIN CAPITAL LETTER S
-250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT
-2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT
-2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT
-2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT
-253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
-252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
-2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL
-251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT
-2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT
-2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL
-2502;SF110000;BOX DRAWINGS LIGHT VERTICAL
-2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
-2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
-2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
-2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
-2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT
-2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL
-2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT
-255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT
-255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
-255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
-255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
-255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
-255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT
-2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT
-2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL
-2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
-2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
-2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL
-256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
-2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
-2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
-2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
-2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
-2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
-2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
-2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
-2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
-256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
-256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
-015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE
-0160;Scaron;LATIN CAPITAL LETTER S WITH CARON
-F6FD;Scaronsmall;LATIN SMALL CAPITAL LETTER S WITH CARON
-015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA
-F6C1;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA;Duplicate
-015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX
-0218;Scommaaccent;LATIN CAPITAL LETTER S WITH COMMA BELOW
-03A3;Sigma;GREEK CAPITAL LETTER SIGMA
-F773;Ssmall;LATIN SMALL CAPITAL LETTER S
-0054;T;LATIN CAPITAL LETTER T
-03A4;Tau;GREEK CAPITAL LETTER TAU
-0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE
-0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON
-0162;Tcommaaccent;LATIN CAPITAL LETTER T WITH CEDILLA
-021A;Tcommaaccent;LATIN CAPITAL LETTER T WITH COMMA BELOW;Duplicate
-0398;Theta;GREEK CAPITAL LETTER THETA
-00DE;Thorn;LATIN CAPITAL LETTER THORN
-F7FE;Thornsmall;LATIN SMALL CAPITAL LETTER THORN
-F6FE;Tildesmall;SMALL CAPITAL SMALL TILDE
-F774;Tsmall;LATIN SMALL CAPITAL LETTER T
-0055;U;LATIN CAPITAL LETTER U
-00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE
-F7FA;Uacutesmall;LATIN SMALL CAPITAL LETTER U WITH ACUTE
-016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE
-00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX
-F7FB;Ucircumflexsmall;LATIN SMALL CAPITAL LETTER U WITH CIRCUMFLEX
-00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS
-F7FC;Udieresissmall;LATIN SMALL CAPITAL LETTER U WITH DIAERESIS
-00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE
-F7F9;Ugravesmall;LATIN SMALL CAPITAL LETTER U WITH GRAVE
-01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN
-0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
-016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON
-0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK
-03A5;Upsilon;GREEK CAPITAL LETTER UPSILON
-03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL
-03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
-038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS
-016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE
-F775;Usmall;LATIN SMALL CAPITAL LETTER U
-0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE
-0056;V;LATIN CAPITAL LETTER V
-F776;Vsmall;LATIN SMALL CAPITAL LETTER V
-0057;W;LATIN CAPITAL LETTER W
-1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE
-0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX
-1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS
-1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE
-F777;Wsmall;LATIN SMALL CAPITAL LETTER W
-0058;X;LATIN CAPITAL LETTER X
-039E;Xi;GREEK CAPITAL LETTER XI
-F778;Xsmall;LATIN SMALL CAPITAL LETTER X
-0059;Y;LATIN CAPITAL LETTER Y
-00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE
-F7FD;Yacutesmall;LATIN SMALL CAPITAL LETTER Y WITH ACUTE
-0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
-0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS
-F7FF;Ydieresissmall;LATIN SMALL CAPITAL LETTER Y WITH DIAERESIS
-1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE
-F779;Ysmall;LATIN SMALL CAPITAL LETTER Y
-005A;Z;LATIN CAPITAL LETTER Z
-0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE
-017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON
-F6FF;Zcaronsmall;LATIN SMALL CAPITAL LETTER Z WITH CARON
-017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE
-0396;Zeta;GREEK CAPITAL LETTER ZETA
-F77A;Zsmall;LATIN SMALL CAPITAL LETTER Z
-0061;a;LATIN SMALL LETTER A
-00E1;aacute;LATIN SMALL LETTER A WITH ACUTE
-0103;abreve;LATIN SMALL LETTER A WITH BREVE
-00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX
-00B4;acute;ACUTE ACCENT
-0301;acutecomb;COMBINING ACUTE ACCENT
-00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS
-00E6;ae;LATIN SMALL LETTER AE
-01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE
-2015;afii00208;HORIZONTAL BAR
-0410;afii10017;CYRILLIC CAPITAL LETTER A
-0411;afii10018;CYRILLIC CAPITAL LETTER BE
-0412;afii10019;CYRILLIC CAPITAL LETTER VE
-0413;afii10020;CYRILLIC CAPITAL LETTER GHE
-0414;afii10021;CYRILLIC CAPITAL LETTER DE
-0415;afii10022;CYRILLIC CAPITAL LETTER IE
-0401;afii10023;CYRILLIC CAPITAL LETTER IO
-0416;afii10024;CYRILLIC CAPITAL LETTER ZHE
-0417;afii10025;CYRILLIC CAPITAL LETTER ZE
-0418;afii10026;CYRILLIC CAPITAL LETTER I
-0419;afii10027;CYRILLIC CAPITAL LETTER SHORT I
-041A;afii10028;CYRILLIC CAPITAL LETTER KA
-041B;afii10029;CYRILLIC CAPITAL LETTER EL
-041C;afii10030;CYRILLIC CAPITAL LETTER EM
-041D;afii10031;CYRILLIC CAPITAL LETTER EN
-041E;afii10032;CYRILLIC CAPITAL LETTER O
-041F;afii10033;CYRILLIC CAPITAL LETTER PE
-0420;afii10034;CYRILLIC CAPITAL LETTER ER
-0421;afii10035;CYRILLIC CAPITAL LETTER ES
-0422;afii10036;CYRILLIC CAPITAL LETTER TE
-0423;afii10037;CYRILLIC CAPITAL LETTER U
-0424;afii10038;CYRILLIC CAPITAL LETTER EF
-0425;afii10039;CYRILLIC CAPITAL LETTER HA
-0426;afii10040;CYRILLIC CAPITAL LETTER TSE
-0427;afii10041;CYRILLIC CAPITAL LETTER CHE
-0428;afii10042;CYRILLIC CAPITAL LETTER SHA
-0429;afii10043;CYRILLIC CAPITAL LETTER SHCHA
-042A;afii10044;CYRILLIC CAPITAL LETTER HARD SIGN
-042B;afii10045;CYRILLIC CAPITAL LETTER YERU
-042C;afii10046;CYRILLIC CAPITAL LETTER SOFT SIGN
-042D;afii10047;CYRILLIC CAPITAL LETTER E
-042E;afii10048;CYRILLIC CAPITAL LETTER YU
-042F;afii10049;CYRILLIC CAPITAL LETTER YA
-0490;afii10050;CYRILLIC CAPITAL LETTER GHE WITH UPTURN
-0402;afii10051;CYRILLIC CAPITAL LETTER DJE
-0403;afii10052;CYRILLIC CAPITAL LETTER GJE
-0404;afii10053;CYRILLIC CAPITAL LETTER UKRAINIAN IE
-0405;afii10054;CYRILLIC CAPITAL LETTER DZE
-0406;afii10055;CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
-0407;afii10056;CYRILLIC CAPITAL LETTER YI
-0408;afii10057;CYRILLIC CAPITAL LETTER JE
-0409;afii10058;CYRILLIC CAPITAL LETTER LJE
-040A;afii10059;CYRILLIC CAPITAL LETTER NJE
-040B;afii10060;CYRILLIC CAPITAL LETTER TSHE
-040C;afii10061;CYRILLIC CAPITAL LETTER KJE
-040E;afii10062;CYRILLIC CAPITAL LETTER SHORT U
-F6C4;afii10063;CYRILLIC SMALL LETTER GHE VARIANT
-F6C5;afii10064;CYRILLIC SMALL LETTER BE VARIANT
-0430;afii10065;CYRILLIC SMALL LETTER A
-0431;afii10066;CYRILLIC SMALL LETTER BE
-0432;afii10067;CYRILLIC SMALL LETTER VE
-0433;afii10068;CYRILLIC SMALL LETTER GHE
-0434;afii10069;CYRILLIC SMALL LETTER DE
-0435;afii10070;CYRILLIC SMALL LETTER IE
-0451;afii10071;CYRILLIC SMALL LETTER IO
-0436;afii10072;CYRILLIC SMALL LETTER ZHE
-0437;afii10073;CYRILLIC SMALL LETTER ZE
-0438;afii10074;CYRILLIC SMALL LETTER I
-0439;afii10075;CYRILLIC SMALL LETTER SHORT I
-043A;afii10076;CYRILLIC SMALL LETTER KA
-043B;afii10077;CYRILLIC SMALL LETTER EL
-043C;afii10078;CYRILLIC SMALL LETTER EM
-043D;afii10079;CYRILLIC SMALL LETTER EN
-043E;afii10080;CYRILLIC SMALL LETTER O
-043F;afii10081;CYRILLIC SMALL LETTER PE
-0440;afii10082;CYRILLIC SMALL LETTER ER
-0441;afii10083;CYRILLIC SMALL LETTER ES
-0442;afii10084;CYRILLIC SMALL LETTER TE
-0443;afii10085;CYRILLIC SMALL LETTER U
-0444;afii10086;CYRILLIC SMALL LETTER EF
-0445;afii10087;CYRILLIC SMALL LETTER HA
-0446;afii10088;CYRILLIC SMALL LETTER TSE
-0447;afii10089;CYRILLIC SMALL LETTER CHE
-0448;afii10090;CYRILLIC SMALL LETTER SHA
-0449;afii10091;CYRILLIC SMALL LETTER SHCHA
-044A;afii10092;CYRILLIC SMALL LETTER HARD SIGN
-044B;afii10093;CYRILLIC SMALL LETTER YERU
-044C;afii10094;CYRILLIC SMALL LETTER SOFT SIGN
-044D;afii10095;CYRILLIC SMALL LETTER E
-044E;afii10096;CYRILLIC SMALL LETTER YU
-044F;afii10097;CYRILLIC SMALL LETTER YA
-0491;afii10098;CYRILLIC SMALL LETTER GHE WITH UPTURN
-0452;afii10099;CYRILLIC SMALL LETTER DJE
-0453;afii10100;CYRILLIC SMALL LETTER GJE
-0454;afii10101;CYRILLIC SMALL LETTER UKRAINIAN IE
-0455;afii10102;CYRILLIC SMALL LETTER DZE
-0456;afii10103;CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
-0457;afii10104;CYRILLIC SMALL LETTER YI
-0458;afii10105;CYRILLIC SMALL LETTER JE
-0459;afii10106;CYRILLIC SMALL LETTER LJE
-045A;afii10107;CYRILLIC SMALL LETTER NJE
-045B;afii10108;CYRILLIC SMALL LETTER TSHE
-045C;afii10109;CYRILLIC SMALL LETTER KJE
-045E;afii10110;CYRILLIC SMALL LETTER SHORT U
-040F;afii10145;CYRILLIC CAPITAL LETTER DZHE
-0462;afii10146;CYRILLIC CAPITAL LETTER YAT
-0472;afii10147;CYRILLIC CAPITAL LETTER FITA
-0474;afii10148;CYRILLIC CAPITAL LETTER IZHITSA
-F6C6;afii10192;CYRILLIC SMALL LETTER DE VARIANT
-045F;afii10193;CYRILLIC SMALL LETTER DZHE
-0463;afii10194;CYRILLIC SMALL LETTER YAT
-0473;afii10195;CYRILLIC SMALL LETTER FITA
-0475;afii10196;CYRILLIC SMALL LETTER IZHITSA
-F6C7;afii10831;CYRILLIC SMALL LETTER PE VARIANT
-F6C8;afii10832;CYRILLIC SMALL LETTER TE VARIANT
-04D9;afii10846;CYRILLIC SMALL LETTER SCHWA
-200E;afii299;LEFT-TO-RIGHT MARK
-200F;afii300;RIGHT-TO-LEFT MARK
-200D;afii301;ZERO WIDTH JOINER
-066A;afii57381;ARABIC PERCENT SIGN
-060C;afii57388;ARABIC COMMA
-0660;afii57392;ARABIC-INDIC DIGIT ZERO
-0661;afii57393;ARABIC-INDIC DIGIT ONE
-0662;afii57394;ARABIC-INDIC DIGIT TWO
-0663;afii57395;ARABIC-INDIC DIGIT THREE
-0664;afii57396;ARABIC-INDIC DIGIT FOUR
-0665;afii57397;ARABIC-INDIC DIGIT FIVE
-0666;afii57398;ARABIC-INDIC DIGIT SIX
-0667;afii57399;ARABIC-INDIC DIGIT SEVEN
-0668;afii57400;ARABIC-INDIC DIGIT EIGHT
-0669;afii57401;ARABIC-INDIC DIGIT NINE
-061B;afii57403;ARABIC SEMICOLON
-061F;afii57407;ARABIC QUESTION MARK
-0621;afii57409;ARABIC LETTER HAMZA
-0622;afii57410;ARABIC LETTER ALEF WITH MADDA ABOVE
-0623;afii57411;ARABIC LETTER ALEF WITH HAMZA ABOVE
-0624;afii57412;ARABIC LETTER WAW WITH HAMZA ABOVE
-0625;afii57413;ARABIC LETTER ALEF WITH HAMZA BELOW
-0626;afii57414;ARABIC LETTER YEH WITH HAMZA ABOVE
-0627;afii57415;ARABIC LETTER ALEF
-0628;afii57416;ARABIC LETTER BEH
-0629;afii57417;ARABIC LETTER TEH MARBUTA
-062A;afii57418;ARABIC LETTER TEH
-062B;afii57419;ARABIC LETTER THEH
-062C;afii57420;ARABIC LETTER JEEM
-062D;afii57421;ARABIC LETTER HAH
-062E;afii57422;ARABIC LETTER KHAH
-062F;afii57423;ARABIC LETTER DAL
-0630;afii57424;ARABIC LETTER THAL
-0631;afii57425;ARABIC LETTER REH
-0632;afii57426;ARABIC LETTER ZAIN
-0633;afii57427;ARABIC LETTER SEEN
-0634;afii57428;ARABIC LETTER SHEEN
-0635;afii57429;ARABIC LETTER SAD
-0636;afii57430;ARABIC LETTER DAD
-0637;afii57431;ARABIC LETTER TAH
-0638;afii57432;ARABIC LETTER ZAH
-0639;afii57433;ARABIC LETTER AIN
-063A;afii57434;ARABIC LETTER GHAIN
-0640;afii57440;ARABIC TATWEEL
-0641;afii57441;ARABIC LETTER FEH
-0642;afii57442;ARABIC LETTER QAF
-0643;afii57443;ARABIC LETTER KAF
-0644;afii57444;ARABIC LETTER LAM
-0645;afii57445;ARABIC LETTER MEEM
-0646;afii57446;ARABIC LETTER NOON
-0648;afii57448;ARABIC LETTER WAW
-0649;afii57449;ARABIC LETTER ALEF MAKSURA
-064A;afii57450;ARABIC LETTER YEH
-064B;afii57451;ARABIC FATHATAN
-064C;afii57452;ARABIC DAMMATAN
-064D;afii57453;ARABIC KASRATAN
-064E;afii57454;ARABIC FATHA
-064F;afii57455;ARABIC DAMMA
-0650;afii57456;ARABIC KASRA
-0651;afii57457;ARABIC SHADDA
-0652;afii57458;ARABIC SUKUN
-0647;afii57470;ARABIC LETTER HEH
-06A4;afii57505;ARABIC LETTER VEH
-067E;afii57506;ARABIC LETTER PEH
-0686;afii57507;ARABIC LETTER TCHEH
-0698;afii57508;ARABIC LETTER JEH
-06AF;afii57509;ARABIC LETTER GAF
-0679;afii57511;ARABIC LETTER TTEH
-0688;afii57512;ARABIC LETTER DDAL
-0691;afii57513;ARABIC LETTER RREH
-06BA;afii57514;ARABIC LETTER NOON GHUNNA
-06D2;afii57519;ARABIC LETTER YEH BARREE
-06D5;afii57534;ARABIC LETTER AE
-20AA;afii57636;NEW SHEQEL SIGN
-05BE;afii57645;HEBREW PUNCTUATION MAQAF
-05C3;afii57658;HEBREW PUNCTUATION SOF PASUQ
-05D0;afii57664;HEBREW LETTER ALEF
-05D1;afii57665;HEBREW LETTER BET
-05D2;afii57666;HEBREW LETTER GIMEL
-05D3;afii57667;HEBREW LETTER DALET
-05D4;afii57668;HEBREW LETTER HE
-05D5;afii57669;HEBREW LETTER VAV
-05D6;afii57670;HEBREW LETTER ZAYIN
-05D7;afii57671;HEBREW LETTER HET
-05D8;afii57672;HEBREW LETTER TET
-05D9;afii57673;HEBREW LETTER YOD
-05DA;afii57674;HEBREW LETTER FINAL KAF
-05DB;afii57675;HEBREW LETTER KAF
-05DC;afii57676;HEBREW LETTER LAMED
-05DD;afii57677;HEBREW LETTER FINAL MEM
-05DE;afii57678;HEBREW LETTER MEM
-05DF;afii57679;HEBREW LETTER FINAL NUN
-05E0;afii57680;HEBREW LETTER NUN
-05E1;afii57681;HEBREW LETTER SAMEKH
-05E2;afii57682;HEBREW LETTER AYIN
-05E3;afii57683;HEBREW LETTER FINAL PE
-05E4;afii57684;HEBREW LETTER PE
-05E5;afii57685;HEBREW LETTER FINAL TSADI
-05E6;afii57686;HEBREW LETTER TSADI
-05E7;afii57687;HEBREW LETTER QOF
-05E8;afii57688;HEBREW LETTER RESH
-05E9;afii57689;HEBREW LETTER SHIN
-05EA;afii57690;HEBREW LETTER TAV
-FB2A;afii57694;HEBREW LETTER SHIN WITH SHIN DOT
-FB2B;afii57695;HEBREW LETTER SHIN WITH SIN DOT
-FB4B;afii57700;HEBREW LETTER VAV WITH HOLAM
-FB1F;afii57705;HEBREW LIGATURE YIDDISH YOD YOD PATAH
-05F0;afii57716;HEBREW LIGATURE YIDDISH DOUBLE VAV
-05F1;afii57717;HEBREW LIGATURE YIDDISH VAV YOD
-05F2;afii57718;HEBREW LIGATURE YIDDISH DOUBLE YOD
-FB35;afii57723;HEBREW LETTER VAV WITH DAGESH
-05B4;afii57793;HEBREW POINT HIRIQ
-05B5;afii57794;HEBREW POINT TSERE
-05B6;afii57795;HEBREW POINT SEGOL
-05BB;afii57796;HEBREW POINT QUBUTS
-05B8;afii57797;HEBREW POINT QAMATS
-05B7;afii57798;HEBREW POINT PATAH
-05B0;afii57799;HEBREW POINT SHEVA
-05B2;afii57800;HEBREW POINT HATAF PATAH
-05B1;afii57801;HEBREW POINT HATAF SEGOL
-05B3;afii57802;HEBREW POINT HATAF QAMATS
-05C2;afii57803;HEBREW POINT SIN DOT
-05C1;afii57804;HEBREW POINT SHIN DOT
-05B9;afii57806;HEBREW POINT HOLAM
-05BC;afii57807;HEBREW POINT DAGESH OR MAPIQ
-05BD;afii57839;HEBREW POINT METEG
-05BF;afii57841;HEBREW POINT RAFE
-05C0;afii57842;HEBREW PUNCTUATION PASEQ
-02BC;afii57929;MODIFIER LETTER APOSTROPHE
-2105;afii61248;CARE OF
-2113;afii61289;SCRIPT SMALL L
-2116;afii61352;NUMERO SIGN
-202C;afii61573;POP DIRECTIONAL FORMATTING
-202D;afii61574;LEFT-TO-RIGHT OVERRIDE
-202E;afii61575;RIGHT-TO-LEFT OVERRIDE
-200C;afii61664;ZERO WIDTH NON-JOINER
-066D;afii63167;ARABIC FIVE POINTED STAR
-02BD;afii64937;MODIFIER LETTER REVERSED COMMA
-00E0;agrave;LATIN SMALL LETTER A WITH GRAVE
-2135;aleph;ALEF SYMBOL
-03B1;alpha;GREEK SMALL LETTER ALPHA
-03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS
-0101;amacron;LATIN SMALL LETTER A WITH MACRON
-0026;ampersand;AMPERSAND
-F726;ampersandsmall;SMALL CAPITAL AMPERSAND
-2220;angle;ANGLE
-2329;angleleft;LEFT-POINTING ANGLE BRACKET
-232A;angleright;RIGHT-POINTING ANGLE BRACKET
-0387;anoteleia;GREEK ANO TELEIA
-0105;aogonek;LATIN SMALL LETTER A WITH OGONEK
-2248;approxequal;ALMOST EQUAL TO
-00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE
-01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
-2194;arrowboth;LEFT RIGHT ARROW
-21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW
-21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW
-21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW
-21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW
-21D1;arrowdblup;UPWARDS DOUBLE ARROW
-2193;arrowdown;DOWNWARDS ARROW
-F8E7;arrowhorizex;HORIZONTAL ARROW EXTENDER
-2190;arrowleft;LEFTWARDS ARROW
-2192;arrowright;RIGHTWARDS ARROW
-2191;arrowup;UPWARDS ARROW
-2195;arrowupdn;UP DOWN ARROW
-21A8;arrowupdnbse;UP DOWN ARROW WITH BASE
-F8E6;arrowvertex;VERTICAL ARROW EXTENDER
-005E;asciicircum;CIRCUMFLEX ACCENT
-007E;asciitilde;TILDE
-002A;asterisk;ASTERISK
-2217;asteriskmath;ASTERISK OPERATOR
-F6E9;asuperior;SUPERSCRIPT LATIN SMALL LETTER A
-0040;at;COMMERCIAL AT
-00E3;atilde;LATIN SMALL LETTER A WITH TILDE
-0062;b;LATIN SMALL LETTER B
-005C;backslash;REVERSE SOLIDUS
-007C;bar;VERTICAL LINE
-03B2;beta;GREEK SMALL LETTER BETA
-2588;block;FULL BLOCK
-F8F4;braceex;CURLY BRACKET EXTENDER
-007B;braceleft;LEFT CURLY BRACKET
-F8F3;braceleftbt;LEFT CURLY BRACKET BOTTOM
-F8F2;braceleftmid;LEFT CURLY BRACKET MID
-F8F1;bracelefttp;LEFT CURLY BRACKET TOP
-007D;braceright;RIGHT CURLY BRACKET
-F8FE;bracerightbt;RIGHT CURLY BRACKET BOTTOM
-F8FD;bracerightmid;RIGHT CURLY BRACKET MID
-F8FC;bracerighttp;RIGHT CURLY BRACKET TOP
-005B;bracketleft;LEFT SQUARE BRACKET
-F8F0;bracketleftbt;LEFT SQUARE BRACKET BOTTOM
-F8EF;bracketleftex;LEFT SQUARE BRACKET EXTENDER
-F8EE;bracketlefttp;LEFT SQUARE BRACKET TOP
-005D;bracketright;RIGHT SQUARE BRACKET
-F8FB;bracketrightbt;RIGHT SQUARE BRACKET BOTTOM
-F8FA;bracketrightex;RIGHT SQUARE BRACKET EXTENDER
-F8F9;bracketrighttp;RIGHT SQUARE BRACKET TOP
-02D8;breve;BREVE
-00A6;brokenbar;BROKEN BAR
-F6EA;bsuperior;SUPERSCRIPT LATIN SMALL LETTER B
-2022;bullet;BULLET
-0063;c;LATIN SMALL LETTER C
-0107;cacute;LATIN SMALL LETTER C WITH ACUTE
-02C7;caron;CARON
-21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS
-010D;ccaron;LATIN SMALL LETTER C WITH CARON
-00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA
-0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX
-010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE
-00B8;cedilla;CEDILLA
-00A2;cent;CENT SIGN
-F6DF;centinferior;SUBSCRIPT CENT SIGN
-F7A2;centoldstyle;OLDSTYLE CENT SIGN
-F6E0;centsuperior;SUPERSCRIPT CENT SIGN
-03C7;chi;GREEK SMALL LETTER CHI
-25CB;circle;WHITE CIRCLE
-2297;circlemultiply;CIRCLED TIMES
-2295;circleplus;CIRCLED PLUS
-02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT
-2663;club;BLACK CLUB SUIT
-003A;colon;COLON
-20A1;colonmonetary;COLON SIGN
-002C;comma;COMMA
-F6C3;commaaccent;COMMA BELOW
-F6E1;commainferior;SUBSCRIPT COMMA
-F6E2;commasuperior;SUPERSCRIPT COMMA
-2245;congruent;APPROXIMATELY EQUAL TO
-00A9;copyright;COPYRIGHT SIGN
-F8E9;copyrightsans;COPYRIGHT SIGN SANS SERIF
-F6D9;copyrightserif;COPYRIGHT SIGN SERIF
-00A4;currency;CURRENCY SIGN
-F6D1;cyrBreve;CAPITAL CYRILLIC BREVE
-F6D2;cyrFlex;CAPITAL CYRILLIC CIRCUMFLEX
-F6D4;cyrbreve;CYRILLIC BREVE
-F6D5;cyrflex;CYRILLIC CIRCUMFLEX
-0064;d;LATIN SMALL LETTER D
-2020;dagger;DAGGER
-2021;daggerdbl;DOUBLE DAGGER
-F6D3;dblGrave;CAPITAL DOUBLE GRAVE ACCENT
-F6D6;dblgrave;DOUBLE GRAVE ACCENT
-010F;dcaron;LATIN SMALL LETTER D WITH CARON
-0111;dcroat;LATIN SMALL LETTER D WITH STROKE
-00B0;degree;DEGREE SIGN
-03B4;delta;GREEK SMALL LETTER DELTA
-2666;diamond;BLACK DIAMOND SUIT
-00A8;dieresis;DIAERESIS
-F6D7;dieresisacute;DIAERESIS ACUTE ACCENT
-F6D8;dieresisgrave;DIAERESIS GRAVE ACCENT
-0385;dieresistonos;GREEK DIALYTIKA TONOS
-00F7;divide;DIVISION SIGN
-2593;dkshade;DARK SHADE
-2584;dnblock;LOWER HALF BLOCK
-0024;dollar;DOLLAR SIGN
-F6E3;dollarinferior;SUBSCRIPT DOLLAR SIGN
-F724;dollaroldstyle;OLDSTYLE DOLLAR SIGN
-F6E4;dollarsuperior;SUPERSCRIPT DOLLAR SIGN
-20AB;dong;DONG SIGN
-02D9;dotaccent;DOT ABOVE
-0323;dotbelowcomb;COMBINING DOT BELOW
-0131;dotlessi;LATIN SMALL LETTER DOTLESS I
-F6BE;dotlessj;LATIN SMALL LETTER DOTLESS J
-22C5;dotmath;DOT OPERATOR
-F6EB;dsuperior;SUPERSCRIPT LATIN SMALL LETTER D
-0065;e;LATIN SMALL LETTER E
-00E9;eacute;LATIN SMALL LETTER E WITH ACUTE
-0115;ebreve;LATIN SMALL LETTER E WITH BREVE
-011B;ecaron;LATIN SMALL LETTER E WITH CARON
-00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX
-00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS
-0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE
-00E8;egrave;LATIN SMALL LETTER E WITH GRAVE
-0038;eight;DIGIT EIGHT
-2088;eightinferior;SUBSCRIPT EIGHT
-F738;eightoldstyle;OLDSTYLE DIGIT EIGHT
-2078;eightsuperior;SUPERSCRIPT EIGHT
-2208;element;ELEMENT OF
-2026;ellipsis;HORIZONTAL ELLIPSIS
-0113;emacron;LATIN SMALL LETTER E WITH MACRON
-2014;emdash;EM DASH
-2205;emptyset;EMPTY SET
-2013;endash;EN DASH
-014B;eng;LATIN SMALL LETTER ENG
-0119;eogonek;LATIN SMALL LETTER E WITH OGONEK
-03B5;epsilon;GREEK SMALL LETTER EPSILON
-03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS
-003D;equal;EQUALS SIGN
-2261;equivalence;IDENTICAL TO
-212E;estimated;ESTIMATED SYMBOL
-F6EC;esuperior;SUPERSCRIPT LATIN SMALL LETTER E
-03B7;eta;GREEK SMALL LETTER ETA
-03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS
-00F0;eth;LATIN SMALL LETTER ETH
-0021;exclam;EXCLAMATION MARK
-203C;exclamdbl;DOUBLE EXCLAMATION MARK
-00A1;exclamdown;INVERTED EXCLAMATION MARK
-F7A1;exclamdownsmall;SMALL CAPITAL INVERTED EXCLAMATION MARK
-F721;exclamsmall;SMALL CAPITAL EXCLAMATION MARK
-2203;existential;THERE EXISTS
-0066;f;LATIN SMALL LETTER F
-2640;female;FEMALE SIGN
-FB00;ff;LATIN SMALL LIGATURE FF
-FB03;ffi;LATIN SMALL LIGATURE FFI
-FB04;ffl;LATIN SMALL LIGATURE FFL
-FB01;fi;LATIN SMALL LIGATURE FI
-2012;figuredash;FIGURE DASH
-25A0;filledbox;BLACK SQUARE
-25AC;filledrect;BLACK RECTANGLE
-0035;five;DIGIT FIVE
-215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS
-2085;fiveinferior;SUBSCRIPT FIVE
-F735;fiveoldstyle;OLDSTYLE DIGIT FIVE
-2075;fivesuperior;SUPERSCRIPT FIVE
-FB02;fl;LATIN SMALL LIGATURE FL
-0192;florin;LATIN SMALL LETTER F WITH HOOK
-0034;four;DIGIT FOUR
-2084;fourinferior;SUBSCRIPT FOUR
-F734;fouroldstyle;OLDSTYLE DIGIT FOUR
-2074;foursuperior;SUPERSCRIPT FOUR
-2044;fraction;FRACTION SLASH
-2215;fraction;DIVISION SLASH;Duplicate
-20A3;franc;FRENCH FRANC SIGN
-0067;g;LATIN SMALL LETTER G
-03B3;gamma;GREEK SMALL LETTER GAMMA
-011F;gbreve;LATIN SMALL LETTER G WITH BREVE
-01E7;gcaron;LATIN SMALL LETTER G WITH CARON
-011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX
-0123;gcommaaccent;LATIN SMALL LETTER G WITH CEDILLA
-0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE
-00DF;germandbls;LATIN SMALL LETTER SHARP S
-2207;gradient;NABLA
-0060;grave;GRAVE ACCENT
-0300;gravecomb;COMBINING GRAVE ACCENT
-003E;greater;GREATER-THAN SIGN
-2265;greaterequal;GREATER-THAN OR EQUAL TO
-00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
-00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
-2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK
-203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
-0068;h;LATIN SMALL LETTER H
-0127;hbar;LATIN SMALL LETTER H WITH STROKE
-0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX
-2665;heart;BLACK HEART SUIT
-0309;hookabovecomb;COMBINING HOOK ABOVE
-2302;house;HOUSE
-02DD;hungarumlaut;DOUBLE ACUTE ACCENT
-002D;hyphen;HYPHEN-MINUS
-00AD;hyphen;SOFT HYPHEN;Duplicate
-F6E5;hypheninferior;SUBSCRIPT HYPHEN-MINUS
-F6E6;hyphensuperior;SUPERSCRIPT HYPHEN-MINUS
-0069;i;LATIN SMALL LETTER I
-00ED;iacute;LATIN SMALL LETTER I WITH ACUTE
-012D;ibreve;LATIN SMALL LETTER I WITH BREVE
-00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX
-00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS
-00EC;igrave;LATIN SMALL LETTER I WITH GRAVE
-0133;ij;LATIN SMALL LIGATURE IJ
-012B;imacron;LATIN SMALL LETTER I WITH MACRON
-221E;infinity;INFINITY
-222B;integral;INTEGRAL
-2321;integralbt;BOTTOM HALF INTEGRAL
-F8F5;integralex;INTEGRAL EXTENDER
-2320;integraltp;TOP HALF INTEGRAL
-2229;intersection;INTERSECTION
-25D8;invbullet;INVERSE BULLET
-25D9;invcircle;INVERSE WHITE CIRCLE
-263B;invsmileface;BLACK SMILING FACE
-012F;iogonek;LATIN SMALL LETTER I WITH OGONEK
-03B9;iota;GREEK SMALL LETTER IOTA
-03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA
-0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
-03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS
-F6ED;isuperior;SUPERSCRIPT LATIN SMALL LETTER I
-0129;itilde;LATIN SMALL LETTER I WITH TILDE
-006A;j;LATIN SMALL LETTER J
-0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX
-006B;k;LATIN SMALL LETTER K
-03BA;kappa;GREEK SMALL LETTER KAPPA
-0137;kcommaaccent;LATIN SMALL LETTER K WITH CEDILLA
-0138;kgreenlandic;LATIN SMALL LETTER KRA
-006C;l;LATIN SMALL LETTER L
-013A;lacute;LATIN SMALL LETTER L WITH ACUTE
-03BB;lambda;GREEK SMALL LETTER LAMDA
-013E;lcaron;LATIN SMALL LETTER L WITH CARON
-013C;lcommaaccent;LATIN SMALL LETTER L WITH CEDILLA
-0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT
-003C;less;LESS-THAN SIGN
-2264;lessequal;LESS-THAN OR EQUAL TO
-258C;lfblock;LEFT HALF BLOCK
-20A4;lira;LIRA SIGN
-F6C0;ll;LATIN SMALL LETTER LL
-2227;logicaland;LOGICAL AND
-00AC;logicalnot;NOT SIGN
-2228;logicalor;LOGICAL OR
-017F;longs;LATIN SMALL LETTER LONG S
-25CA;lozenge;LOZENGE
-0142;lslash;LATIN SMALL LETTER L WITH STROKE
-F6EE;lsuperior;SUPERSCRIPT LATIN SMALL LETTER L
-2591;ltshade;LIGHT SHADE
-006D;m;LATIN SMALL LETTER M
-00AF;macron;MACRON
-02C9;macron;MODIFIER LETTER MACRON;Duplicate
-2642;male;MALE SIGN
-2212;minus;MINUS SIGN
-2032;minute;PRIME
-F6EF;msuperior;SUPERSCRIPT LATIN SMALL LETTER M
-00B5;mu;MICRO SIGN
-03BC;mu;GREEK SMALL LETTER MU;Duplicate
-00D7;multiply;MULTIPLICATION SIGN
-266A;musicalnote;EIGHTH NOTE
-266B;musicalnotedbl;BEAMED EIGHTH NOTES
-006E;n;LATIN SMALL LETTER N
-0144;nacute;LATIN SMALL LETTER N WITH ACUTE
-0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
-0148;ncaron;LATIN SMALL LETTER N WITH CARON
-0146;ncommaaccent;LATIN SMALL LETTER N WITH CEDILLA
-0039;nine;DIGIT NINE
-2089;nineinferior;SUBSCRIPT NINE
-F739;nineoldstyle;OLDSTYLE DIGIT NINE
-2079;ninesuperior;SUPERSCRIPT NINE
-2209;notelement;NOT AN ELEMENT OF
-2260;notequal;NOT EQUAL TO
-2284;notsubset;NOT A SUBSET OF
-207F;nsuperior;SUPERSCRIPT LATIN SMALL LETTER N
-00F1;ntilde;LATIN SMALL LETTER N WITH TILDE
-03BD;nu;GREEK SMALL LETTER NU
-0023;numbersign;NUMBER SIGN
-006F;o;LATIN SMALL LETTER O
-00F3;oacute;LATIN SMALL LETTER O WITH ACUTE
-014F;obreve;LATIN SMALL LETTER O WITH BREVE
-00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX
-00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS
-0153;oe;LATIN SMALL LIGATURE OE
-02DB;ogonek;OGONEK
-00F2;ograve;LATIN SMALL LETTER O WITH GRAVE
-01A1;ohorn;LATIN SMALL LETTER O WITH HORN
-0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE
-014D;omacron;LATIN SMALL LETTER O WITH MACRON
-03C9;omega;GREEK SMALL LETTER OMEGA
-03D6;omega1;GREEK PI SYMBOL
-03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS
-03BF;omicron;GREEK SMALL LETTER OMICRON
-03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS
-0031;one;DIGIT ONE
-2024;onedotenleader;ONE DOT LEADER
-215B;oneeighth;VULGAR FRACTION ONE EIGHTH
-F6DC;onefitted;PROPORTIONAL DIGIT ONE
-00BD;onehalf;VULGAR FRACTION ONE HALF
-2081;oneinferior;SUBSCRIPT ONE
-F731;oneoldstyle;OLDSTYLE DIGIT ONE
-00BC;onequarter;VULGAR FRACTION ONE QUARTER
-00B9;onesuperior;SUPERSCRIPT ONE
-2153;onethird;VULGAR FRACTION ONE THIRD
-25E6;openbullet;WHITE BULLET
-00AA;ordfeminine;FEMININE ORDINAL INDICATOR
-00BA;ordmasculine;MASCULINE ORDINAL INDICATOR
-221F;orthogonal;RIGHT ANGLE
-00F8;oslash;LATIN SMALL LETTER O WITH STROKE
-01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE
-F6F0;osuperior;SUPERSCRIPT LATIN SMALL LETTER O
-00F5;otilde;LATIN SMALL LETTER O WITH TILDE
-0070;p;LATIN SMALL LETTER P
-00B6;paragraph;PILCROW SIGN
-0028;parenleft;LEFT PARENTHESIS
-F8ED;parenleftbt;LEFT PAREN BOTTOM
-F8EC;parenleftex;LEFT PAREN EXTENDER
-208D;parenleftinferior;SUBSCRIPT LEFT PARENTHESIS
-207D;parenleftsuperior;SUPERSCRIPT LEFT PARENTHESIS
-F8EB;parenlefttp;LEFT PAREN TOP
-0029;parenright;RIGHT PARENTHESIS
-F8F8;parenrightbt;RIGHT PAREN BOTTOM
-F8F7;parenrightex;RIGHT PAREN EXTENDER
-208E;parenrightinferior;SUBSCRIPT RIGHT PARENTHESIS
-207E;parenrightsuperior;SUPERSCRIPT RIGHT PARENTHESIS
-F8F6;parenrighttp;RIGHT PAREN TOP
-2202;partialdiff;PARTIAL DIFFERENTIAL
-0025;percent;PERCENT SIGN
-002E;period;FULL STOP
-00B7;periodcentered;MIDDLE DOT
-2219;periodcentered;BULLET OPERATOR;Duplicate
-F6E7;periodinferior;SUBSCRIPT FULL STOP
-F6E8;periodsuperior;SUPERSCRIPT FULL STOP
-22A5;perpendicular;UP TACK
-2030;perthousand;PER MILLE SIGN
-20A7;peseta;PESETA SIGN
-03C6;phi;GREEK SMALL LETTER PHI
-03D5;phi1;GREEK PHI SYMBOL
-03C0;pi;GREEK SMALL LETTER PI
-002B;plus;PLUS SIGN
-00B1;plusminus;PLUS-MINUS SIGN
-211E;prescription;PRESCRIPTION TAKE
-220F;product;N-ARY PRODUCT
-2282;propersubset;SUBSET OF
-2283;propersuperset;SUPERSET OF
-221D;proportional;PROPORTIONAL TO
-03C8;psi;GREEK SMALL LETTER PSI
-0071;q;LATIN SMALL LETTER Q
-003F;question;QUESTION MARK
-00BF;questiondown;INVERTED QUESTION MARK
-F7BF;questiondownsmall;SMALL CAPITAL INVERTED QUESTION MARK
-F73F;questionsmall;SMALL CAPITAL QUESTION MARK
-0022;quotedbl;QUOTATION MARK
-201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK
-201C;quotedblleft;LEFT DOUBLE QUOTATION MARK
-201D;quotedblright;RIGHT DOUBLE QUOTATION MARK
-2018;quoteleft;LEFT SINGLE QUOTATION MARK
-201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK
-2019;quoteright;RIGHT SINGLE QUOTATION MARK
-201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK
-0027;quotesingle;APOSTROPHE
-0072;r;LATIN SMALL LETTER R
-0155;racute;LATIN SMALL LETTER R WITH ACUTE
-221A;radical;SQUARE ROOT
-F8E5;radicalex;RADICAL EXTENDER
-0159;rcaron;LATIN SMALL LETTER R WITH CARON
-0157;rcommaaccent;LATIN SMALL LETTER R WITH CEDILLA
-2286;reflexsubset;SUBSET OF OR EQUAL TO
-2287;reflexsuperset;SUPERSET OF OR EQUAL TO
-00AE;registered;REGISTERED SIGN
-F8E8;registersans;REGISTERED SIGN SANS SERIF
-F6DA;registerserif;REGISTERED SIGN SERIF
-2310;revlogicalnot;REVERSED NOT SIGN
-03C1;rho;GREEK SMALL LETTER RHO
-02DA;ring;RING ABOVE
-F6F1;rsuperior;SUPERSCRIPT LATIN SMALL LETTER R
-2590;rtblock;RIGHT HALF BLOCK
-F6DD;rupiah;RUPIAH SIGN
-0073;s;LATIN SMALL LETTER S
-015B;sacute;LATIN SMALL LETTER S WITH ACUTE
-0161;scaron;LATIN SMALL LETTER S WITH CARON
-015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA
-F6C2;scedilla;LATIN SMALL LETTER S WITH CEDILLA;Duplicate
-015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX
-0219;scommaaccent;LATIN SMALL LETTER S WITH COMMA BELOW
-2033;second;DOUBLE PRIME
-00A7;section;SECTION SIGN
-003B;semicolon;SEMICOLON
-0037;seven;DIGIT SEVEN
-215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS
-2087;seveninferior;SUBSCRIPT SEVEN
-F737;sevenoldstyle;OLDSTYLE DIGIT SEVEN
-2077;sevensuperior;SUPERSCRIPT SEVEN
-2592;shade;MEDIUM SHADE
-03C3;sigma;GREEK SMALL LETTER SIGMA
-03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA
-223C;similar;TILDE OPERATOR
-0036;six;DIGIT SIX
-2086;sixinferior;SUBSCRIPT SIX
-F736;sixoldstyle;OLDSTYLE DIGIT SIX
-2076;sixsuperior;SUPERSCRIPT SIX
-002F;slash;SOLIDUS
-263A;smileface;WHITE SMILING FACE
-0020;space;SPACE
-00A0;space;NO-BREAK SPACE;Duplicate
-2660;spade;BLACK SPADE SUIT
-F6F2;ssuperior;SUPERSCRIPT LATIN SMALL LETTER S
-00A3;sterling;POUND SIGN
-220B;suchthat;CONTAINS AS MEMBER
-2211;summation;N-ARY SUMMATION
-263C;sun;WHITE SUN WITH RAYS
-0074;t;LATIN SMALL LETTER T
-03C4;tau;GREEK SMALL LETTER TAU
-0167;tbar;LATIN SMALL LETTER T WITH STROKE
-0165;tcaron;LATIN SMALL LETTER T WITH CARON
-0163;tcommaaccent;LATIN SMALL LETTER T WITH CEDILLA
-021B;tcommaaccent;LATIN SMALL LETTER T WITH COMMA BELOW;Duplicate
-2234;therefore;THEREFORE
-03B8;theta;GREEK SMALL LETTER THETA
-03D1;theta1;GREEK THETA SYMBOL
-00FE;thorn;LATIN SMALL LETTER THORN
-0033;three;DIGIT THREE
-215C;threeeighths;VULGAR FRACTION THREE EIGHTHS
-2083;threeinferior;SUBSCRIPT THREE
-F733;threeoldstyle;OLDSTYLE DIGIT THREE
-00BE;threequarters;VULGAR FRACTION THREE QUARTERS
-F6DE;threequartersemdash;THREE QUARTERS EM DASH
-00B3;threesuperior;SUPERSCRIPT THREE
-02DC;tilde;SMALL TILDE
-0303;tildecomb;COMBINING TILDE
-0384;tonos;GREEK TONOS
-2122;trademark;TRADE MARK SIGN
-F8EA;trademarksans;TRADE MARK SIGN SANS SERIF
-F6DB;trademarkserif;TRADE MARK SIGN SERIF
-25BC;triagdn;BLACK DOWN-POINTING TRIANGLE
-25C4;triaglf;BLACK LEFT-POINTING POINTER
-25BA;triagrt;BLACK RIGHT-POINTING POINTER
-25B2;triagup;BLACK UP-POINTING TRIANGLE
-F6F3;tsuperior;SUPERSCRIPT LATIN SMALL LETTER T
-0032;two;DIGIT TWO
-2025;twodotenleader;TWO DOT LEADER
-2082;twoinferior;SUBSCRIPT TWO
-F732;twooldstyle;OLDSTYLE DIGIT TWO
-00B2;twosuperior;SUPERSCRIPT TWO
-2154;twothirds;VULGAR FRACTION TWO THIRDS
-0075;u;LATIN SMALL LETTER U
-00FA;uacute;LATIN SMALL LETTER U WITH ACUTE
-016D;ubreve;LATIN SMALL LETTER U WITH BREVE
-00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX
-00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS
-00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE
-01B0;uhorn;LATIN SMALL LETTER U WITH HORN
-0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE
-016B;umacron;LATIN SMALL LETTER U WITH MACRON
-005F;underscore;LOW LINE
-2017;underscoredbl;DOUBLE LOW LINE
-222A;union;UNION
-2200;universal;FOR ALL
-0173;uogonek;LATIN SMALL LETTER U WITH OGONEK
-2580;upblock;UPPER HALF BLOCK
-03C5;upsilon;GREEK SMALL LETTER UPSILON
-03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA
-03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
-03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS
-016F;uring;LATIN SMALL LETTER U WITH RING ABOVE
-0169;utilde;LATIN SMALL LETTER U WITH TILDE
-0076;v;LATIN SMALL LETTER V
-0077;w;LATIN SMALL LETTER W
-1E83;wacute;LATIN SMALL LETTER W WITH ACUTE
-0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX
-1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS
-2118;weierstrass;SCRIPT CAPITAL P
-1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE
-0078;x;LATIN SMALL LETTER X
-03BE;xi;GREEK SMALL LETTER XI
-0079;y;LATIN SMALL LETTER Y
-00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE
-0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX
-00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS
-00A5;yen;YEN SIGN
-1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE
-007A;z;LATIN SMALL LETTER Z
-017A;zacute;LATIN SMALL LETTER Z WITH ACUTE
-017E;zcaron;LATIN SMALL LETTER Z WITH CARON
-017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE
-0030;zero;DIGIT ZERO
-2080;zeroinferior;SUBSCRIPT ZERO
-F730;zerooldstyle;OLDSTYLE DIGIT ZERO
-2070;zerosuperior;SUPERSCRIPT ZERO
-03B6;zeta;GREEK SMALL LETTER ZETA
-"""
-
-
-t1_bias    = 0
-glyph_list = []
-
-
-def the_adobe_glyph_list():
-  """return the list of glyph names in the adobe list"""
-
-  lines  = string.split( adobe_glyph_list, '\n' )
-  glyphs = []
-
-  for line in lines:
-    if line:
-      fields = string.split( line, ';' )
-#     print fields[0] + ' - ' + fields[1]
-      glyphs.append( fields[1] )
-
-  return glyphs
-
-
-def the_adobe_glyphs():
-  """return the list of unicode values"""
-
-  lines  = string.split( adobe_glyph_list, '\n' )
-  glyphs = []
-  values = []
-
-  for line in lines:
-    if line:
-      fields = string.split( line, ';' )
-#     print fields[0] + ' - ' + fields[1]
-      glyphs.append( fields[1] )
-      values.append( fields[0] )
-
-  return glyphs, values
-
-
-def count_extra_glyphs( alist, filter ):
-  """count the number of extra glyphs"""
-
-  count  = 0
-  extras = []
-
-  for name in alist:
-    try:
-      filtered_index = filter.index( name )
-    except:
-      extras.append( name )
-
-  return extras
-
-
-def dump_mac_indices( file, t1_bias ):
-  write = file.write
-
-  write( "  static const unsigned short  mac_standard_names[" + \
-        repr( len( mac_standard_names ) + 1 ) + "] =\n" )
-  write( "  {\n" )
-
-  count = 0
-  for name in mac_standard_names:
-    try:
-      t1_index = t1_standard_strings.index( name )
-      write( "    " + repr( t1_bias + t1_index ) + ",\n" )
-    except:
-      write( "    " + repr( count ) + ",\n" )
-      count = count + 1
-
-  write( "    0\n" )
-  write( "  };\n" )
-  write( "\n" )
-  write( "\n" )
-
-
-def dump_glyph_list( file, glyph_list, adobe_extra ):
-  write = file.write
-
-  name_list = []
-
-  write( "  static const char* const  standard_glyph_names[] =\n" )
-  write( "  {\n" )
-
-  for name in glyph_list:
-    write( '    "' + name + '",\n' )
-    name_list.append( name )
-
-  write( "\n" )
-  write( "#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST\n" )
-  write( "\n" )
-
-  for name in adobe_extra:
-    write( '    "' + name + '",\n' )
-    name_list.append( name )
-
-  write( "\n" )
-  write( "#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */\n" )
-  write( "\n" )
-  write( "    0\n" )
-  write( "  };\n" )
-  write( "\n" )
-  write( "\n" )
-
-  return name_list
-
-
-def dump_unicode_values( file, base_list, adobe_list ):
-  """build the glyph names to unicode values table"""
-
-  write = file.write
-
-  adobe_glyphs, uni_values = the_adobe_glyphs()
-
-  write( "\n" )
-  write( "  static const unsigned short  names_to_unicode[" + \
-          repr( len( base_list ) + len( adobe_list ) + 1 ) + "] =\n" )
-  write( "  {\n" )
-
-  for name in base_list:
-    try:
-      index = adobe_glyphs.index( name )
-      write( "    0x" + uni_values[index] + ",\n" )
-    except:
-      write( "    0,\n" )
-
-  write( "\n" )
-  write( "#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST\n" )
-  write( "\n" )
-
-  for name in adobe_list:
-    try:
-      index = adobe_glyphs.index( name )
-      write( "    0x" + uni_values[index] + ",\n" )
-    except:
-      write( "    0,\n" )
-
-  write( "\n" )
-  write( "#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */\n" )
-  write( "    0\n" )
-  write( "  };\n" )
-  write( "\n" )
-  write( "\n" )
-  write( "\n" )
-
-
-def dump_encoding( file, encoding_name, encoding_list ):
-  """dumps a given encoding"""
-
-  write = file.write
-
-  write( "  static const unsigned short  " + encoding_name + "[" + \
-          repr( len( encoding_list ) + 1 ) + "] =\n" )
-  write( "  {\n" )
-
-  for value in encoding_list:
-    write( "    " + repr( value ) + ",\n" )
-  write( "    0\n" )
-  write( "  };\n" )
-  write( "\n" )
-  write( "\n" )
-
-
-def main():
-  """main program body"""
-
-  if len( sys.argv ) != 2:
-    print __doc__ % sys.argv[0]
-    sys.exit( 1 )
-
-  file  = open( sys.argv[1], "w\n" )
-  write = file.write
-
-  count_sid = len( t1_standard_strings )
-
-  # build mac index table & supplemental glyph names
-  mac_list   = count_extra_glyphs( mac_standard_names, t1_standard_strings )
-  count_mac  = len( mac_list )
-  t1_bias    = count_mac
-  base_list  = mac_list + t1_standard_strings
-
-  # build adobe unicode index table & supplemental glyph names
-  adobe_list  = the_adobe_glyph_list()
-  adobe_list  = count_extra_glyphs( adobe_list, base_list )
-  count_adobe = len( adobe_list )
-
-  write( "/***************************************************************************/\n" )
-  write( "/*                                                                         */\n" )
-
-  write( "/*  %-71s*/\n" % sys.argv[1] )
-
-  write( "/*                                                                         */\n" )
-  write( "/*    PostScript glyph names (specification only).                         */\n" )
-  write( "/*                                                                         */\n" )
-  write( "/*  Copyright 2000 by                                                      */\n" )
-  write( "/*  David Turner, Robert Wilhelm, and Werner Lemberg.                      */\n" )
-  write( "/*                                                                         */\n" )
-  write( "/*  This file is part of the FreeType project, and may only be used,       */\n" )
-  write( "/*  modified, and distributed under the terms of the FreeType project      */\n" )
-  write( "/*  license, LICENSE.TXT.  By continuing to use, modify, or distribute     */\n" )
-  write( "/*  this file you indicate that you have read the license and              */\n" )
-  write( "/*  understand and accept it fully.                                        */\n" )
-  write( "/*                                                                         */\n" )
-  write( "/***************************************************************************/\n" )
-  write( "\n" )
-  write( "\n" )
-  write( "  /* this file has been generated automatically -- do not edit! */\n" )
-  write( "\n" )
-  write( "\n" )
-
-  # dump glyph list
-  name_list = dump_glyph_list( file, base_list, adobe_list )
-
-  # dump t1_standard_list
-  write( "  static const char* const * const  t1_standard_glyphs = " \
-          + "standard_glyph_names + " + repr( t1_bias ) + ";\n" )
-  write( "\n" )
-  write( "\n" )
-
-  write( "#define NUM_STD_GLYPHS " + repr( len( t1_standard_strings ) ) + "\n" )
-  write( "\n" )
-  write( "#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST\n" )
-  write( "#define NUM_ADOBE_GLYPHS " + \
-          repr( len( base_list ) + len( adobe_list ) - t1_bias ) + "\n" )
-  write( "#else\n" )
-  write( "#define NUM_ADOBE_GLYPHS " + \
-          repr( len( base_list ) - t1_bias )  + "\n" )
-  write( "#endif\n" )
-  write( "\n" )
-  write( "\n" )
-
-  # dump mac indices table
-  dump_mac_indices( file, t1_bias )
-
-  # discard mac names from base list
-  base_list = base_list[t1_bias:]
-
-  # dump unicode values table
-  dump_unicode_values( file, base_list, adobe_list )
-
-  dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
-  dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
-
-  write( "/* END */\n" )
-
-
-# Now run the main routine
-#
-main()
-
-
-# END
--- a/include/freetype/config/ftconfig.h
+++ b/include/freetype/config/ftconfig.h
@@ -127,6 +127,20 @@
 #error "no 32bit type found -- please check your configuration files"
 #endif
 
+/* now, lookup for an integer type that is at least 32 bits */
+#if FT_SIZEOF_INT >= 4
+
+  typedef int           FT_Fast;
+  typedef unsigned int  FT_UFast;
+
+#elif FT_SIZEOF_LONG >= 4
+
+  typedef long          FT_Fast
+  typedef unsigned long FT_UFast
+  
+#endif
+
+
 
   /* determine whether we have a 64-bit int type for platforms without */
   /* Autoconf                                                          */
--- a/include/freetype/config/ftheader.h
+++ b/include/freetype/config/ftheader.h
@@ -436,7 +436,7 @@
 
   /* */
 
- 
+#define FT_TRIGONOMETRY_H          <freetype/fttrigon.h>
 #define FT_SYNTHESIS_H             <freetype/ftsynth.h>
 
 #define FT_CACHE_MANAGER_H         <freetype/cache/ftcmanag.h>
--- /dev/null
+++ b/include/freetype/fttrigon.h
@@ -1,0 +1,213 @@
+#ifndef __FT_TRIGONOMETRY_H__
+#define __FT_TRIGONOMETRY_H__
+
+FT_BEGIN_HEADER
+
+ /***************************************************************************
+  *
+  * @section: computations
+  *
+  */
+
+ /***************************************************************************
+  *
+  * @type: FT_Angle
+  *
+  * @description:
+  *   this type is used to model angle values in FreeType. Note that
+  *   the angle is a 16.16 fixed float value expressed in _degrees_
+  */
+  typedef FT_Fixed   FT_Angle;
+
+ /***************************************************************************
+  *
+  * @macro: FT_ANGLE_PI
+  *
+  * @description:
+  *   the angle pi expressed in @FT_Angle units
+  */
+#define  FT_ANGLE_PI   (180L << 16)
+
+ /***************************************************************************
+  *
+  * @macro: FT_ANGLE_2PI
+  *
+  * @description:
+  *   the angle 2pi expressed in @FT_Angle units
+  */
+#define  FT_ANGLE_2PI  (FT_ANGLE_PI*2)
+
+ /***************************************************************************
+  *
+  * @macro: FT_ANGLE_PI2
+  *
+  * @description:
+  *   the angle pi/2 expressed in @FT_Angle units
+  */
+#define  FT_ANGLE_PI2  (FT_ANGLE_PI/2)
+
+ /***************************************************************************
+  *
+  * @macro: FT_ANGLE_PI4
+  *
+  * @description:
+  *   the angle pi/4 expressed in @FT_Angle units
+  */
+#define  FT_ANGLE_PI4  (FT_ANGLE_PI/4)
+
+
+ /***************************************************************************
+  *
+  * @function: FT_Sin
+  *
+  * @description:
+  *   return the sinus of a given angle in fixed point format
+  *
+  * @input:
+  *    angle :: input angle
+  *
+  * @return:
+  *    sinus value
+  *
+  * @note:
+  *   if you need both the sinus and cosinus for a given angle, you'd
+  *   better use the function @FT_Vector_Unit
+  */
+  FT_EXPORT(FT_Fixed)    FT_Sin( FT_Angle  angle );
+
+ /***************************************************************************
+  *
+  * @function: FT_Cos
+  *
+  * @description:
+  *   return the cosinus of a given angle in fixed point format
+  *
+  * @input:
+  *    angle :: input angle
+  *
+  * @return:
+  *    cosinus value
+  *
+  * @note:
+  *   if you need both the sinus and cosinus for a given angle, you'd
+  *   better use the function @FT_Vector_Unit
+  */
+  FT_EXPORT(FT_Fixed)    FT_Cos( FT_Angle  angle );  
+
+ /***************************************************************************
+  *
+  * @function: FT_Tan
+  *
+  * @description:
+  *   return the tangent of a given angle in fixed point format
+  *
+  * @input:
+  *   angle :: input angle
+  *
+  * @return:
+  *   tangent value
+  */
+  FT_EXPORT(FT_Fixed)    FT_Tan( FT_Angle  angle );
+
+
+ /***************************************************************************
+  *
+  * @function: FT_Atan2
+  *
+  * @description:
+  *   return the arc-tangent corresponding to a given vector (x,y) in
+  *   the 2d plane
+  *
+  * @input:
+  *   x :: horizontal vector coordinate
+  *   y :: vertical vector coordinate
+  *
+  * @return:
+  *   arc-tangent value (i.e. angle)
+  */
+  FT_EXPORT(FT_Angle)    FT_Atan2( FT_Fixed  x, FT_Fixed  y );
+
+
+ /***************************************************************************
+  *
+  * @function: FT_Vector_Unit
+  *
+  * @description:
+  *   return the unit vector corresponding to a given angle. After the call,
+  *   the value of "vec.x" will be "sin(theta)", and the value of "vec.y"
+  *   will be "cos(angle)"
+  *
+  *   this function is useful to retrieve both the sinus and cosinus
+  *   of a given angle quickly
+  *
+  * @input:
+  *   vec   :: address of target vector
+  *   angle :: address of angle
+  */
+  FT_EXPORT(void)        FT_Vector_Unit( FT_Vector*  vec,
+                                         FT_Angle    angle );
+
+ /***************************************************************************
+  *
+  * @function: FT_Vector_Rotate
+  *
+  * @description:
+  *   rotate a given vector by a given angle
+  *
+  * @input:
+  *   vec   :: address of target vector
+  *   angle :: address of angle
+  */
+  FT_EXPORT(void)        FT_Vector_Rotate( FT_Vector*    vec,
+                                           FT_Angle      angle );
+
+ /***************************************************************************
+  *
+  * @function: FT_Vector_Length
+  *
+  * @description:
+  *   returns the length of a given vector
+  *
+  * @input:
+  *   vec   :: address of target vector
+  *
+  * @return:
+  *   vector length, expressed in the same units that the original
+  *   vector coordinates !!
+  */
+  FT_EXPORT(FT_Fixed)    FT_Vector_Length( FT_Vector*  vec );
+
+ /***************************************************************************
+  *
+  * @function: FT_Vector_Normalize
+  *
+  * @description:
+  *   normalize a given vector (i.e. compute the equivalent unit vector)
+  *
+  * @input:
+  *   vec   :: address of target vector
+  */
+  FT_EXPORT(void)        FT_Vector_Normalize( FT_Vector*  vec );
+
+ /***************************************************************************
+  *
+  * @function: FT_Vector_Polarize
+  *
+  * @description:
+  *   compute both the length and angle of a given vector
+  *
+  * @input:
+  *   vec    :: address of source vector
+  *
+  * @output:
+  *   length :: vector length
+  *   angle  :: vector angle
+  */
+  FT_EXPORT(void)        FT_Vector_Polarize( FT_Vector*  vec,
+                                             FT_Fixed   *length,
+                                             FT_Angle   *angle );
+  /* */
+  
+FT_END_HEADER
+
+#endif /* __FT_TRIGONOMETRY_H__ */
--- a/include/freetype/t1tables.h
+++ b/include/freetype/t1tables.h
@@ -113,8 +113,8 @@
     FT_Bool    force_bold;
     FT_Bool    round_stem_up;
 
-    FT_Short   snap_widths [13];  /* reserve one place for the std */
-    FT_Short   snap_heights[13];  /* reserve one place for the std */
+    FT_Short   snap_widths [13];  /* including std width  */
+    FT_Short   snap_heights[13];  /* including std height */
 
     FT_Long    language_group;
     FT_Long    password;
@@ -136,12 +136,12 @@
   /*                                                                       */
   typedef enum
   {
-    /* required fields in a FontInfo blend dictionary */
+    /*# required fields in a FontInfo blend dictionary */
     t1_blend_underline_position = 0,
     t1_blend_underline_thickness,
     t1_blend_italic_angle,
 
-    /* required fields in a Private blend dictionary */
+    /*# required fields in a Private blend dictionary */
     t1_blend_blue_values,
     t1_blend_other_blues,
     t1_blend_standard_width,
@@ -154,7 +154,7 @@
     t1_blend_family_other_blues,
     t1_blend_force_bold,
 
-    /* never remove */
+    /*# never remove */
     t1_blend_max
 
   } T1_Blend_Flags;
--- a/src/autohint/ahglyph.c
+++ b/src/autohint/ahglyph.c
@@ -359,7 +359,7 @@
       outline->horz_major_dir = ah_dir_right;
     }
 
-#else
+#else  /* !1 */
 
     /* Compute the vertical and horizontal major directions; this is     */
     /* currently done by inspecting the `ft_outline_reverse_fill' flag.  */
@@ -374,7 +374,7 @@
       outline->horz_major_dir = ah_dir_right;
     }
 
-#endif /* 1 */
+#endif /* !1 */
 
     outline->x_scale = face->size->metrics.x_scale;
     outline->y_scale = face->size->metrics.y_scale;
--- a/src/base/Jamfile
+++ b/src/base/Jamfile
@@ -10,7 +10,7 @@
 
   if $(FT2_MULTI)
   {
-    _sources = ftcalc ftextend ftlist ftobjs ftstream ftoutln ftnames ;
+    _sources = ftcalc ftextend ftlist ftobjs ftstream ftoutln ftnames fttrigon ;
   }
   else
   {
--- a/src/base/ftbase.c
+++ b/src/base/ftbase.c
@@ -21,6 +21,7 @@
 #define  FT_MAKE_OPTION_SINGLE_OBJECT
 
 #include "ftcalc.c"
+#include "fttrigon.c"
 #include "ftobjs.c"
 #include "ftstream.c"
 #include "ftlist.c"
--- a/src/base/ftbbox.c
+++ b/src/base/ftbbox.c
@@ -278,13 +278,13 @@
 #else
 
   static void
-  test_cubic_zero( FT_Pos    y1,
-                   FT_Pos    y2,
-                   FT_Pos    y3,
-                   FT_Pos    y4,
-                   FT_Fixed  u,
-                   FT_Pos*   min,
-                   FT_Pos*   max )
+  test_cubic_extrema( FT_Pos    y1,
+                      FT_Pos    y2,
+                      FT_Pos    y3,
+                      FT_Pos    y4,
+                      FT_Fixed  u,
+                      FT_Pos*   min,
+                      FT_Pos*   max )
   {
  /* FT_Pos    a = y4 - 3*y3 + 3*y2 - y1; */
     FT_Pos    b = y3 - 2*y2 + y1;
@@ -373,6 +373,26 @@
         int       shift = 0;
 
 
+        /* technical explanation of what's happening there             */
+        /*                                                             */
+        /*   the following computation is based on the fact that for   */
+        /*   any value "y", if "n" is the position of the most         */
+        /*   significant bit of "abs(y)" (starting from 0 for the      */
+        /*   least significant bit), then y is in the range            */
+        /*                                                             */
+        /*                  "-2^n..2^n-1"                              */
+        /*                                                             */
+        /*   we want to shift "a", "b" and "c" concurrently in order   */
+        /*   to ensure that they all fit in 8.16 values, which maps    */
+        /*   to the integer range "-2^23..2^23-1"                      */
+        /*                                                             */
+        /*   necessarily, we need to shift "a", "b" and "c" so that    */
+        /*   the most significant bit of their absolute values is at   */
+        /*   _most_ at position 23                                     */
+        /*                                                             */
+        /*   we begin by computing "t1" as the bitwise "or" of the     */
+        /*   absolute values of "a", "b", "c"                          */
+        /*                                                             */
         t1  = (FT_ULong)((a >= 0) ? a : -a );
         t2  = (FT_ULong)((b >= 0) ? b : -b );
         t1 |= t2;
@@ -379,28 +399,55 @@
         t2  = (FT_ULong)((c >= 0) ? c : -c );
         t1 |= t2;
 
+        /*   now, the most significant bit of "t1" is sure to be the   */
+        /*   msb of one of "a", "b", "c", depending on which one is    */
+        /*   expressed in the greatest integer range..                 */
+        /*                                                             */
+        /*   we will now compute the "shift", by shifting "t1" as many */
+        /*   times as necessary to move its msb to position 23.        */
+        /*                                                             */
+        /*   this corresponds to a value of t1 that is in the range    */
+        /*   0x40_0000..0x7F_FFFF                                      */
+        /*                                                             */
+        /*   finally, we shift "a", "b" and "c" by the same amount.    */
+        /*   this ensure that all values are now in the range          */
+        /*   -2^23..2^23, i.e. that they're now expressed as 8.16      */
+        /*   fixed float numbers..                                     */
+        /*                                                             */
+        /*   this also means that we're using 24 bits of precision     */
+        /*   to compute the zeros, independently of the range of       */
+        /*   the original polynom coefficients.                        */
+        /*                                                             */
+        /*   this should ensure reasonably accurate values for the     */
+        /*   zeros. Note that the latter are only expressed with       */
+        /*   16 bits when computing the extrema (the zeros need to     */
+        /*   be in 0..1 exclusive to be considered part of the arc)    */
+        /*                                                             */
+
         if ( t1 == 0 )  /* all coefficients are 0! */
           return;
 
-        if ( t1 > 0x7FFFFFL )
+        if ( t1 > 0x7FFFFFUL )
         {
           do
           {
             shift++;
             t1 >>= 1;
-          } while ( t1 > 0x7FFFFFL );
+          } while ( t1 > 0x7FFFFFUL );
 
+          /* losing some bits of precision, but we'll use 24 of them */
+          /* for the computation anyway..                            */
           a >>= shift;
           b >>= shift;
           c >>= shift;
         }
-        else if ( t1 < 0x400000L )
+        else if ( t1 < 0x400000UL )
         {
           do
           {
             shift++;
             t1 <<= 1;
-          } while ( t1 < 0x400000L );
+          } while ( t1 < 0x400000UL );
 
           a <<= shift;
           b <<= shift;
@@ -414,7 +461,7 @@
         if ( b != 0 )
         {
           t = - FT_DivFix( c, b ) / 2;
-          test_cubic_zero( y1, y2, y3, y4, t, min, max );
+          test_cubic_extrema( y1, y2, y3, y4, t, min, max );
         }
       }
       else
@@ -428,7 +475,7 @@
         {
           /* there is a single split point at -b/a */
           t = - FT_DivFix( b, a );
-          test_cubic_zero( y1, y2, y3, y4, t, min, max );
+          test_cubic_extrema( y1, y2, y3, y4, t, min, max );
         }
         else
         {
@@ -435,10 +482,10 @@
           /* there are two solutions; we need to filter them though */
           d = FT_SqrtFixed( (FT_Int32)d );
           t = - FT_DivFix( b - d, a );
-          test_cubic_zero( y1, y2, y3, y4, t, min, max );
+          test_cubic_extrema( y1, y2, y3, y4, t, min, max );
 
           t = - FT_DivFix( b + d, a );
-          test_cubic_zero( y1, y2, y3, y4, t, min, max );
+          test_cubic_extrema( y1, y2, y3, y4, t, min, max );
         }
       }
     }
--- a/src/base/ftobjs.c
+++ b/src/base/ftobjs.c
@@ -1240,14 +1240,15 @@
             goto Success;
 
           if ( error != FT_Err_Unknown_File_Format )
-            goto Fail;
+            goto Fail2;
         }
       }
 
-      ft_done_stream( &stream, external_stream );
-
       /* no driver is able to handle this format */
       error = FT_Err_Unknown_File_Format;
+
+  Fail2:
+      ft_done_stream( &stream, external_stream );
       goto Fail;
     }
 
--- /dev/null
+++ b/src/base/fttrigon.c
@@ -1,0 +1,404 @@
+#include <ft2build.h>
+#include FT_TRIGONOMETRY_H
+
+/* the following is 0.2715717684432231 * 2^30 */
+#define  FT_TRIG_COSCALE   0x11616E8E	/* 291597966 = 0.2715717684432241 * 2^30, valid for j>13 */
+
+  /* this table was generated for FT_PI = 180L << 16, i.e. degrees */
+#define  FT_TRIG_MAX_ITERS  23
+
+  static const FT_Fixed
+  ft_trig_arctan_table[ 24 ] =
+  {
+    4157273, 2949120, 1740967, 919879, 466945, 234379, 117304, 58666,
+    29335, 14668, 7334, 3667, 1833, 917, 458, 229, 115, 57, 29, 14, 7,
+    4, 2, 1
+  };
+
+
+/* the Cordic shrink factor, multiplied by 2^32 */
+#define  FT_TRIG_SCALE    1166391785  /* 0x4585BA38U */
+
+#ifdef FT_CONFIG_HAS_INT64
+
+ /* multiply a given value by the CORDIC shrink factor */
+  static FT_Fixed
+  ft_trig_downscale( FT_Fixed  val )
+  {
+    FT_Fixed  s;
+    FT_Int64  v;
+
+    s   = val;
+    val = (val >= 0) ? val : -val;
+    
+    v   = (val * (FT_Int64)FT_TRIG_SCALE) + 0x100000000L;
+    val = (FT_Fixed)(v >> 32);
+
+    return ( s >= 0 ) ? val : -val;
+  }
+
+#else /* !FT_CONFIG_HAS_INT64 */
+
+ /* multiply a given value by the CORDIC shrink factor */
+  static FT_Fixed
+  ft_trig_downscale( FT_Fixed  val )
+  {
+    FT_Fixed   s;
+    FT_UInt32  v1, v2, k1, k2, hi, lo1, lo2, lo3;
+    
+    s   = val;
+    val = ( val >= 0 ) ? val : -val;
+
+    v1 = (FT_UInt32)val >> 16;
+    v2 = (FT_UInt32)val & 0xFFFF;
+    
+    k1 = FT_TRIG_SCALE >> 16;      /* constant */
+    k2 = FT_TRIG_SCALE & 0xFFFF;   /* constant */
+
+    hi   = k1*v1;
+    lo1  = k1*v2 + k2*v1;   /* can't overflow */
+    
+    lo2  = k2*v2 >> 16;
+    lo3  = ( lo1 >= lo2 ) ? lo1 : lo2;
+    lo1 += lo2;
+
+    hi  += lo1 >> 16;
+    if (lo1 < lo3)
+      hi += 0x10000U;
+
+    val  = (FT_Fixed)hi;
+
+    return ( s >= 0 ) ? val : -val;
+  }
+
+#endif /* !FT_CONFIG_HAS_INT64 */
+
+
+  static FT_Int
+  ft_trig_prenorm( FT_Vector*  vec )
+  {
+    FT_Fixed  x, y, z;
+    FT_Int    shift;
+
+    x = vec->x;
+    y = vec->y;
+    
+    z     = (( x >= 0 ) ? x : - x) | ((y >= 0) ? y : -y);
+    shift = 0;
+    
+    if ( z < (1L << 27) )
+    {
+      do
+      {
+        shift++;
+        z <<= 1;
+      }
+      while ( z < (1L << 27) );
+      
+      vec->x = (x << shift);
+      vec->y = (y << shift);
+    }
+    else if ( z > (1L << 28 ) )
+    {
+      do
+      {
+        shift++;
+        z >>= 1;
+      }
+      while ( z > (1L << 28) );
+
+      vec->x = (x >> shift);
+      vec->y = (y >> shift);
+      shift  = -shift;
+    }
+    return shift;
+  }
+    
+  
+
+
+  static void
+  ft_trig_pseudo_rotate( FT_Vector*  vec, FT_Angle  theta )
+  {
+    FT_Int          i;
+    FT_Fixed        x, y, xtemp;
+    const FT_Fixed *arctanptr;
+
+    x = vec->x;
+    y = vec->y;
+
+    /* Get angle between -90 and 90 degrees */
+    while (theta <= -FT_ANGLE_PI2)
+    {
+      x = -x;
+      y = -y;
+      theta += FT_ANGLE_PI;
+    }
+    while (theta > FT_ANGLE_PI2)
+    {
+      x = -x;
+      y = -y;                   
+      theta -= FT_ANGLE_PI;
+    }
+
+    /* Initial pseudorotation, with left shift */
+    arctanptr = ft_trig_arctan_table;
+    if (theta < 0)
+    {
+      xtemp  = x + (y << 1);
+      y      = y - (x << 1);
+      x      = xtemp;
+      theta += *arctanptr++;
+    }
+    else
+    {
+      xtemp  = x - (y << 1);
+      y      = y + (x << 1);
+      x      = xtemp;
+      theta -= *arctanptr++;
+    }
+
+    /* Subsequent pseudorotations, with right shifts */
+    i = 0;
+    do
+    {
+      if (theta < 0)
+      {
+        xtemp = x + (y >> i);
+        y     = y - (x >> i);
+        x     = xtemp;
+        theta += *arctanptr++;
+      }
+      else
+      {
+        xtemp  = x - (y >> i);
+        y      = y + (x >> i);
+        x      = xtemp;
+        theta -= *arctanptr++;
+      }
+    }
+    while ( ++i < FT_TRIG_MAX_ITERS );
+
+    vec->x = x;
+    vec->y = y;
+  }
+
+
+  static void
+  ft_trig_pseudo_polarize( FT_Vector*  vec )
+  {
+    FT_Fixed theta;
+    FT_Fixed yi, i;
+    FT_Fixed x, y;
+    const FT_Fixed *arctanptr;
+
+    x = vec->x;
+    y = vec->y;
+
+    /* Get the vector into the right half plane */
+    theta = 0;
+    if (x < 0)
+    {
+      x = -x;
+      y = -y;
+      theta = 2 * FT_ANGLE_PI2;
+    }
+
+    if (y > 0)
+      theta = - theta;
+
+    arctanptr = ft_trig_arctan_table;
+    if (y < 0)
+    {
+      /* Rotate positive */
+      yi = y + (x << 1);
+      x  = x - (y << 1);
+      y  = yi;
+      theta -= *arctanptr++;  /* Subtract angle */
+    }
+    else
+    {
+      /* Rotate negative */
+      yi = y - (x << 1);
+      x  = x + (y << 1);
+      y  = yi;
+      theta += *arctanptr++;  /* Add angle */
+    }
+
+    i = 0;
+    do
+    {
+      if (y < 0)
+      {
+        /* Rotate positive */
+        yi = y + (x >> i);
+        x  = x - (y >> i);
+        y  = yi;
+        theta -= *arctanptr++;
+      }
+      else
+      {
+        /* Rotate negative */
+        yi = y - (x >> i);
+        x  = x + (y >> i);
+        y  = yi;
+        theta += *arctanptr++;
+      }
+    }
+    while (++i < FT_TRIG_MAX_ITERS);
+
+    /* round theta */
+    if ( theta >= 0 )
+      theta = ( theta + 16 ) & -32;
+    else
+      theta = - (( -theta + 16 ) & -32);
+
+    vec->x = x;
+    vec->y = theta;
+  }
+
+
+  FT_EXPORT_DEF(FT_Fixed)
+  FT_Cos( FT_Angle  angle )
+  {
+    FT_Vector  v;
+    
+    v.x = FT_TRIG_COSCALE >> 2;
+    v.y = 0;
+    ft_trig_pseudo_rotate( &v, angle );
+    
+    return v.x >> 12;
+  }
+
+
+  FT_EXPORT_DEF(FT_Fixed)
+  FT_Sin( FT_Angle  angle )
+  {
+    return FT_Cos( FT_ANGLE_PI2-angle );
+  }
+
+
+  FT_EXPORT_DEF(FT_Fixed)
+  FT_Tan( FT_Angle  angle )
+  {
+    FT_Vector  v;
+    
+    v.x = FT_TRIG_COSCALE >> 2;
+    v.y = 0;
+    ft_trig_pseudo_rotate( &v, angle );
+    
+    return FT_DivFix( v.y, v.x );
+  }
+
+
+
+  FT_EXPORT_DEF(FT_Angle)
+  FT_Atan2( FT_Fixed  dx,
+            FT_Fixed  dy )
+  {
+    FT_Vector  v;
+    
+    if ( dx == 0 && dy == 0 )
+      return 0;
+
+    v.x = dx;
+    v.y = dy;      
+    ft_trig_prenorm( &v );
+    ft_trig_pseudo_polarize( &v );
+    return v.y;
+  }
+
+
+  FT_EXPORT_DEF(void)
+  FT_Vector_Unit( FT_Vector*  vec,
+                  FT_Angle    angle )
+  {
+	vec->x = FT_TRIG_COSCALE >> 2;
+	vec->y = 0;
+	ft_trig_pseudo_rotate( vec, angle );
+	vec->x >>= 12;
+	vec->y >>= 12;
+  }
+
+
+  FT_EXPORT_DEF(void)
+  FT_Vector_Rotate( FT_Vector*  vec,
+                    FT_Angle  angle )
+  {
+    FT_Int     shift;
+    FT_Vector  v;
+    
+    v.x   = vec->x;
+    v.y   = vec->y;
+    if ( angle && ( v.x != 0 || v.y != 0 ) )
+    {
+      shift = ft_trig_prenorm( &v );
+      ft_trig_pseudo_rotate( &v, angle );
+      v.x = ft_trig_downscale( v.x );
+      v.y = ft_trig_downscale( v.y );
+      
+      if ( shift >= 0 )
+      {
+        vec->x = v.x >> shift;
+        vec->y = v.y >> shift;
+      }
+      else
+      {
+        shift  = -shift;
+        vec->x = v.x << shift;
+        vec->y = v.y << shift;
+      }
+    }
+  }
+
+
+  FT_EXPORT_DEF(FT_Fixed)
+  FT_Vector_Length( FT_Vector*  vec )
+  {
+    FT_Int    shift;
+    FT_Vector v;
+    
+    v = *vec;
+
+    /* handle trivial cases */    
+    if ( v.x == 0 )
+    {
+      return ( v.y >= 0 ) ? v.y : -v.y;
+    }
+    else if ( v.y == 0 )
+    {
+      return ( v.x >= 0 ) ? v.x : -v.x;
+    }
+
+    /* general case */
+    shift = ft_trig_prenorm( &v );
+    ft_trig_pseudo_polarize( &v );
+    
+    v.x = ft_trig_downscale( v.x );
+    return ( shift >= 0 ) ? (v.x >> shift) : (v.x << -shift);
+  }  
+
+
+  FT_EXPORT_DEF(void)
+  FT_Vector_Polarize( FT_Vector*  vec,
+                      FT_Fixed   *length,
+                      FT_Angle   *angle )
+  {
+    FT_Int    shift;
+    FT_Vector v;
+    
+    v = *vec;
+    
+    if ( v.x == 0 && v.y == 0 )
+      return;
+      
+    shift = ft_trig_prenorm( &v );
+    ft_trig_pseudo_polarize( &v );
+    
+    v.x = ft_trig_downscale( v.x );
+
+    *length = ( shift >= 0 ) ? (v.x >> shift) : (v.x << -shift);
+    *angle  = v.y;
+  }                    
+
+
--- a/src/base/rules.mk
+++ b/src/base/rules.mk
@@ -33,6 +33,7 @@
 #   ftsystem, ftinit, and ftdebug are handled by freetype.mk
 #
 BASE_SRC := $(BASE_)ftcalc.c   \
+            $(BASE_)fttrigon.c \
             $(BASE_)ftextend.c \
             $(BASE_)ftlist.c   \
             $(BASE_)ftobjs.c   \
--- /dev/null
+++ b/src/tools/cordic.py
@@ -1,0 +1,78 @@
+# compute arctangent table for CORDIC computations in fttrigon.c
+import sys, math
+
+units  = 180*65536   # don't change !!
+scale  = units/math.pi
+shrink = 1.0
+comma  = ""
+
+def calc_val( x ):
+    global units, shrink
+    angle  = math.atan(x)
+    shrink = shrink * math.cos(angle)
+    return angle/math.pi * units
+
+def  print_val( n, x ):
+    global comma
+
+    lo  = int(x)
+    hi  = lo + 1
+    alo = math.atan(lo)
+    ahi = math.atan(hi)
+    ax  = math.atan(2.0**n)
+
+    errlo = abs( alo - ax )
+    errhi = abs( ahi - ax )
+    
+    if ( errlo < errhi ):
+      hi = lo
+
+    sys.stdout.write( comma + repr( int(hi) ) )
+    comma = ", "
+
+
+print ""
+print "table of arctan( 1/2^n ) for PI = " + repr(units/65536.0) + " units"
+
+# compute range of "i"
+r = [-1]
+r = r + range(32)
+
+for n in r:
+
+    if n >= 0:
+        x = 1.0/(2.0**n)    # tangent value
+    else:
+        x = 2.0**(-n)
+      
+    angle  = math.atan(x)    # arctangent 
+    angle2 = angle*scale     # arctangent in FT_Angle units
+
+    # determine which integer value for angle gives the best tangent
+    lo  = int(angle2)
+    hi  = lo + 1
+    tlo = math.tan(lo/scale)
+    thi = math.tan(hi/scale)
+
+    errlo = abs( tlo - x )
+    errhi = abs( thi - x )
+
+    angle2 = hi
+    if errlo < errhi:
+        angle2 = lo
+
+    if angle2 <= 0:
+        break
+    
+    sys.stdout.write( comma + repr( int(angle2) ) )
+    comma = ", "
+    
+    shrink = shrink * math.cos( angle2/scale)
+      
+
+print
+print "shrink factor    = " + repr( shrink )
+print "shrink factor 2  = " + repr( shrink * (2.0**32) )
+print "expansion factor = " + repr(1/shrink)
+print ""
+   
\ No newline at end of file
--- /dev/null
+++ b/src/tools/docmaker.py
@@ -1,0 +1,1660 @@
+#!/usr/bin/env python
+#
+#  DocMaker 0.1 (c) 2000-2001 David Turner <[email protected]>
+#
+#  DocMaker is a very simple program used to generate the API Reference
+#  of programs by extracting comments from source files, and generating
+#  the equivalent HTML documentation.
+#
+#  DocMaker is very similar to other tools like Doxygen, with the
+#  following differences:
+#
+#    - It is written in Python (so it is slow, but easy to maintain and
+#      improve).
+#
+#    - The comment syntax used by DocMaker is simpler and makes for
+#      clearer comments.
+#
+#  Of course, it doesn't have all the goodies of most similar tools,
+#  (e.g. C++ class hierarchies), but hey, it is only 2000 lines of
+#  Python.
+#
+#  DocMaker is mainly used to generate the API references of several
+#  FreeType packages.
+#
+#   - David
+#
+
+import fileinput, sys, os, time, string, glob, getopt
+
+# The Project's title.  This can be overridden from the command line with
+# the options "-t" or "--title".
+#
+project_title = "Project"
+
+# The project's filename prefix.  This can be set from the command line with
+# the options "-p" or "--prefix"
+#
+project_prefix = ""
+
+# The project's documentation output directory.  This can be set from the
+# command line with the options "-o" or "--output".
+#
+output_dir = None
+
+
+# The following defines the HTML header used by all generated pages.
+#
+html_header_1 = """\
+<html>
+<header>
+<title>"""
+
+html_header_2= """ API Reference</title>
+<basefont face="Verdana,Geneva,Arial,Helvetica">
+<style content="text/css">
+  P { text-align=justify }
+  H1 { text-align=center }
+  LI { text-align=justify }
+</style>
+</header>
+<body text=#000000
+      bgcolor=#FFFFFF
+      link=#0000EF
+      vlink=#51188E
+      alink=#FF0000>
+<center><h1>"""
+
+html_header_3=""" API Reference</h1></center>
+"""
+
+# This is recomputed later when the project title changes.
+#
+html_header = html_header_1 + project_title + html_header_2 + project_title + html_header_3
+
+
+# The HTML footer used by all generated pages.
+#
+html_footer = """\
+</body>
+</html>"""
+
+# The header and footer used for each section.
+#
+section_title_header = "<center><h1>"
+section_title_footer = "</h1></center>"
+
+# The header and footer used for code segments.
+#
+code_header = "<font color=blue><pre>"
+code_footer = "</pre></font>"
+
+# Paragraph header and footer.
+#
+para_header = "<p>"
+para_footer = "</p>"
+
+# Block header and footer.
+#
+block_header = "<center><table width=75%><tr><td>"
+block_footer = "</td></tr></table><hr width=75%></center>"
+
+# Description header/footer.
+#
+description_header = "<center><table width=87%><tr><td>"
+description_footer = "</td></tr></table></center><br>"
+
+# Marker header/inter/footer combination.
+#
+marker_header = "<center><table width=87% cellpadding=5><tr bgcolor=#EEEEFF><td><em><b>"
+marker_inter  = "</b></em></td></tr><tr><td>"
+marker_footer = "</td></tr></table></center>"
+
+# Source code extracts header/footer.
+#
+source_header = "<center><table width=87%><tr bgcolor=#D6E8FF width=100%><td><pre>"
+source_footer = "</pre></table></center><br>"
+
+# Chapter header/inter/footer.
+#
+chapter_header = "<center><table width=75%><tr><td><h2>"
+chapter_inter  = "</h2><ul>"
+chapter_footer = "</ul></td></tr></table></center>"
+
+current_section = None
+
+
+# This function is used to sort the index.  It is a simple lexicographical
+# sort, except that it places capital letters before lowercase ones.
+#
+def index_sort( s1, s2 ):
+    if not s1:
+        return -1
+
+    if not s2:
+        return 1
+
+    l1 = len( s1 )
+    l2 = len( s2 )
+    m1 = string.lower( s1 )
+    m2 = string.lower( s2 )
+
+    for i in range( l1 ):
+        if i >= l2 or m1[i] > m2[i]:
+            return 1
+
+        if m1[i] < m2[i]:
+            return -1
+
+        if s1[i] < s2[i]:
+            return -1
+
+        if s1[i] > s2[i]:
+            return 1
+
+    if l2 > l1:
+        return -1
+
+    return 0
+
+
+# Sort input_list, placing the elements of order_list in front.
+#
+def sort_order_list( input_list, order_list ):
+    new_list = order_list[:]
+    for id in input_list:
+        if not id in order_list:
+            new_list.append( id )
+    return new_list
+
+
+# Translate a single line of source to HTML.  This will convert
+# a "<" into "&lt.", ">" into "&gt.", etc.
+#
+def html_format( line ):
+    result = string.replace( line, "<", "&lt." )
+    result = string.replace( line, ">", "&gt." )
+    result = string.replace( line, "&", "&amp." )
+    return result
+
+
+# Open the standard output to a given project documentation file.  Use
+# "output_dir" to determine the filename location if necessary and save the
+# old stdout in a tuple that is returned by this function.
+#
+def open_output( filename ):
+    global output_dir
+
+    if output_dir and output_dir != "":
+        filename = output_dir + os.sep + filename
+
+    old_stdout = sys.stdout
+    new_file   = open( filename, "w" )
+    sys.stdout = new_file
+
+    return ( new_file, old_stdout )
+
+
+# Close the output that was returned by "close_output".
+#
+def close_output( output ):
+    output[0].close()
+    sys.stdout = output[1]
+
+
+# Check output directory.
+#
+def check_output( ):
+    global output_dir
+    if output_dir:
+        if output_dir != "":
+            if not os.path.isdir( output_dir ):
+                sys.stderr.write( "argument" + " '" + output_dir + "' " +
+                                  "is not a valid directory" )
+                sys.exit( 2 )
+        else:
+            output_dir = None
+
+
+def compute_time_html( ):
+    global html_footer
+    time_string = time.asctime( time.localtime( time.time() ) )
+    html_footer = "<p><center><font size=""-2"">generated on " + time_string + "</font></p></center>" + html_footer
+
+# The FreeType 2 reference is extracted from the source files.  These
+# contain various comment blocks that follow one of the following formats:
+#
+#  /**************************
+#   *
+#   *  FORMAT1
+#   *
+#   *
+#   *
+#   *
+#   *************************/
+#
+#  /**************************/
+#  /*                        */
+#  /*  FORMAT2               */
+#  /*                        */
+#  /*                        */
+#  /*                        */
+#  /*                        */
+#
+#  /**************************/
+#  /*                        */
+#  /*  FORMAT3               */
+#  /*                        */
+#  /*                        */
+#  /*                        */
+#  /*                        */
+#  /**************************/
+#
+# Each block contains a list of markers; each one can be followed by
+# some arbitrary text or a list of fields.  Here an example:
+#
+#    <Struct>
+#       MyStruct
+#
+#    <Description>
+#       this structure holds some data
+#
+#    <Fields>
+#       x :: horizontal coordinate
+#       y :: vertical coordinate
+#
+#
+# This example defines three markers: 'Struct', 'Description' & 'Fields'.
+# The first two markers contain arbitrary text, while the last one contains
+# a list of fields.
+#
+# Each field is simply of the format:  WORD :: TEXT...
+#
+# Note that typically each comment block is followed by some source code
+# declaration that may need to be kept in the reference.
+#
+# Note that markers can alternatively be written as "@MARKER:" instead of
+# "<MARKER>".  All marker identifiers are converted to lower case during
+# parsing in order to simply sorting.
+#
+# We associate with each block the following source lines that do not begin
+# with a comment.  For example, the following:
+#
+#   /**********************************
+#    *
+#    * <mytag>  blabla
+#    *
+#    */
+#
+#   bla_bla_bla
+#   bilip_bilip
+#
+#   /* - this comment acts as a separator - */
+#
+#   blo_blo_blo
+#
+#
+# will only keep the first two lines of sources with
+# the "blabla" block.
+#
+# However, the comment will be kept, with following source lines if it
+# contains a starting '#' or '@' as in:
+#
+#   /*@.....*/
+#   /*#.....*/
+#   /* @.....*/
+#   /* #.....*/
+#
+
+
+
+#############################################################################
+#
+# The DocCode class is used to store source code lines.
+#
+#   'self.lines' contains a set of source code lines that will be dumped as
+#   HTML in a <PRE> tag.
+#
+#   The object is filled line by line by the parser; it strips the leading
+#   "margin" space from each input line before storing it in 'self.lines'.
+#
+class DocCode:
+
+    def __init__( self, margin = 0 ):
+        self.lines  = []
+        self.margin = margin
+
+
+    def add( self, line ):
+        # remove margin whitespace
+        #
+        if string.strip( line[: self.margin] ) == "":
+            line = line[self.margin :]
+        self.lines.append( line )
+
+
+    def dump( self ):
+        for line in self.lines:
+            print "--" + line
+        print ""
+
+
+    def get_identifier( self ):
+        # this function should never be called
+        #
+        return "UNKNOWN_CODE_IDENTIFIER!"
+
+
+    def dump_html( self, identifiers = None ):
+        # clean the last empty lines
+        #
+        l = len( self.lines ) - 1
+        while l > 0 and string.strip( self.lines[l - 1] ) == "":
+            l = l - 1
+
+        # The code footer should be directly appended to the last code
+        # line to avoid an additional blank line.
+        #
+        sys.stdout.write( code_header )
+        for line in self.lines[0 : l+1]:
+            sys.stdout.write( '\n' + html_format(line) )
+        sys.stdout.write( code_footer )
+
+
+
+#############################################################################
+#
+# The DocParagraph is used to store text paragraphs.
+# 'self.words' is simply a list of words for the paragraph.
+#
+# The paragraph is filled line by line by the parser.
+#
+class DocParagraph:
+
+    def __init__( self ):
+        self.words = []
+
+
+    def add( self, line ):
+        # Get rid of unwanted spaces in the paragraph.
+        #
+        # The following two lines are the same as
+        #
+        #   self.words.extend( string.split( line ) )
+        #
+        # but older Python versions don't have the `extend' attribute.
+        #
+        last = len( self.words )
+        self.words[last : last] = string.split( line )
+
+
+    # This function is used to retrieve the first word of a given
+    # paragraph.
+    #
+    def get_identifier( self ):
+        if self.words:
+            return self.words[0]
+
+        # should never happen
+        #
+        return "UNKNOWN_PARA_IDENTIFIER!"
+
+
+    def get_words( self ):
+        return self.words[:]
+
+
+    def dump( self, identifiers = None ):
+        max_width = 50
+        cursor    = 0
+        line      = ""
+        extra     = None
+        alphanum  = string.lowercase + string.uppercase + string.digits + '_'
+
+        for word in self.words:
+            # process cross references if needed
+            #
+            if identifiers and word and word[0] == '@':
+                word = word[1 :]
+
+                # we need to find non-alphanumeric characters
+                #
+                l = len( word )
+                i = 0
+                while i < l and word[i] in alphanum:
+                    i = i + 1
+
+                if i < l:
+                    extra = word[i :]
+                    word  = word[0 : i]
+
+                block = identifiers.get( word )
+                if block:
+                    word = '<a href="' + block.html_address() + '">' + word + '</a>'
+                else:
+                    word = '?' + word
+
+            if cursor + len( word ) + 1 > max_width:
+                print html_format( line )
+                cursor = 0
+                line   = ""
+
+            line = line + word
+            if not extra:
+                line = line + " "
+
+            cursor = cursor + len( word ) + 1
+
+
+            # Handle trailing periods, commas, etc. at the end of cross
+            # references.
+            #
+            if extra:
+                if cursor + len( extra ) + 1 > max_width:
+                    print html_format( line )
+                    cursor = 0
+                    line   = ""
+
+                line   = line + extra + " "
+                cursor = cursor + len( extra ) + 1
+                extra  = None
+
+        if cursor > 0:
+            print html_format(line)
+
+        # print "�" # for debugging only
+
+
+    def dump_string( self ):
+        s     = ""
+        space = ""
+        for word in self.words:
+            s     = s + space + word
+            space = " "
+
+        return s
+
+
+    def dump_html( self, identifiers = None ):
+        print para_header
+        self.dump( identifiers )
+        print para_footer
+
+
+
+#############################################################################
+#
+# DocContent is used to store the content of a given marker.
+#
+# The "self.items" list contains (field,elements) records, where "field"
+# corresponds to a given structure field or function parameter (indicated
+# by a "::"), or NULL for a normal section of text/code.
+#
+# Hence, the following example:
+#
+#   <MyMarker>
+#      This is an example of what can be put in a content section,
+#
+#      A second line of example text.
+#
+#      x :: A simple test field, with some contents.
+#      y :: Even before, this field has some code contents.
+#           {
+#             y = x+2;
+#           }
+#
+# should be stored as
+#
+#     [ ( None, [ DocParagraph, DocParagraph] ),
+#       ( "x",  [ DocParagraph ] ),
+#       ( "y",  [ DocParagraph, DocCode ] ) ]
+#
+# in 'self.items'.
+#
+# The DocContent object is entirely built at creation time; you must pass a
+# list of input text lines in the "lines_list" parameter.
+#
+class DocContent:
+
+    def __init__( self, lines_list ):
+        self.items  = []
+        code_mode   = 0
+        code_margin = 0
+        text        = []
+        paragraph   = None   # represents the current DocParagraph
+        code        = None   # represents the current DocCode
+
+        elements    = []     # the list of elements for the current field;
+                             # contains DocParagraph or DocCode objects
+
+        field       = None   # the current field
+
+        for aline in lines_list:
+            if code_mode == 0:
+                line   = string.lstrip( aline )
+                l      = len( line )
+                margin = len( aline ) - l
+
+                # if the line is empty, this is the end of the current
+                # paragraph
+                #
+                if l == 0 or line == '{':
+                    if paragraph:
+                        elements.append( paragraph )
+                        paragraph = None
+
+                    if line == "":
+                        continue
+
+                    code_mode   = 1
+                    code_margin = margin
+                    code        = None
+                    continue
+
+                words = string.split( line )
+
+                # test for a field delimiter on the start of the line, i.e.
+                # the token `::'
+                #
+                if len( words ) >= 2 and words[1] == "::":
+                    # start a new field - complete current paragraph if any
+                    #
+                    if paragraph:
+                        elements.append( paragraph )
+                        paragraph = None
+
+                    # append previous "field" to self.items
+                    #
+                    self.items.append( ( field, elements ) )
+
+                    # start new field and elements list
+                    #
+                    field    = words[0]
+                    elements = []
+                    words    = words[2 :]
+
+                # append remaining words to current paragraph
+                #
+                if len( words ) > 0:
+                    line = string.join( words )
+                    if not paragraph:
+                        paragraph = DocParagraph()
+                    paragraph.add( line )
+
+            else:
+                # we are in code mode...
+                #
+                line = aline
+
+                # the code block ends with a line that has a single '}' on
+                # it that is located at the same column that the opening
+                # accolade...
+                #
+                if line == " " * code_margin + '}':
+                    if code:
+                        elements.append( code )
+                        code = None
+
+                    code_mode   = 0
+                    code_margin = 0
+
+                # otherwise, add the line to the current paragraph
+                #
+                else:
+                    if not code:
+                        code = DocCode()
+                    code.add( line )
+
+        if paragraph:
+            elements.append( paragraph )
+
+        if code:
+            elements.append( code )
+
+        self.items.append( ( field, elements ) )
+
+
+    def get_identifier( self ):
+        if self.items:
+            item = self.items[0]
+            for element in item[1]:
+                return element.get_identifier()
+
+        # should never happen
+        #
+        return "UNKNOWN_CONTENT_IDENTIFIER!"
+
+
+    def get_title( self ):
+        if self.items:
+            item = self.items[0]
+            for element in item[1]:
+                return element.dump_string()
+
+        # should never happen
+        #
+        return "UNKNOWN_CONTENT_TITLE!"
+
+
+    def dump( self ):
+        for item in self.items:
+            field = item[0]
+            if field:
+                print "<field " + field + ">"
+
+            for element in item[1]:
+                element.dump()
+
+            if field:
+                print "</field>"
+
+
+    def dump_html( self, identifiers = None ):
+        n        = len( self.items )
+        in_table = 0
+
+        for i in range( n ):
+            item  = self.items[i]
+            field = item[0]
+
+            if not field:
+                if in_table:
+                    print "</td></tr></table>"
+                    in_table = 0
+
+                for element in item[1]:
+                    element.dump_html( identifiers )
+
+            else:
+                if not in_table:
+                    print "<table cellpadding=4><tr valign=top><td>"
+                    in_table = 1
+                else:
+                    print "</td></tr><tr valign=top><td>"
+
+                print "<b>" + field + "</b></td><td>"
+
+                for element in item[1]:
+                    element.dump_html( identifiers )
+
+        if in_table:
+            print "</td></tr></table>"
+
+
+    def dump_html_in_table( self, identifiers = None ):
+        n        = len( self.items )
+        in_table = 0
+
+        for i in range( n ):
+            item  = self.items[i]
+            field = item[0]
+
+            if not field:
+                if item[1]:
+                    print "<tr><td colspan=2>"
+                    for element in item[1]:
+                        element.dump_html( identifiers )
+                    print "</td></tr>"
+
+            else:
+                print "<tr><td><b>" + field + "</b></td><td>"
+
+                for element in item[1]:
+                    element.dump_html( identifiers )
+
+                print "</td></tr>"
+
+
+
+#############################################################################
+#
+# The DocBlock class is used to store a given comment block.  It contains
+# a list of markers, as well as a list of contents for each marker.
+#
+#   "self.items" is a list of (marker, contents) elements, where 'marker' is
+#   a lowercase marker string, and 'contents' is a DocContent object.
+#
+#   "self.source" is simply a list of text lines taken from the uncommented
+#   source itself.
+#
+#   Finally, "self.name" is a simple identifier used to uniquely identify
+#   the block. It is taken from the first word of the first paragraph of the
+#   first marker of a given block, i.e:
+#
+#      <Type> Goo
+#      <Description> Bla bla bla
+#
+#   will have a name of "Goo"
+#
+class DocBlock:
+
+    def __init__( self, block_line_list = [], source_line_list = [] ):
+        self.items    = []               # current ( marker, contents ) list
+        self.section  = None             # section this block belongs to
+        self.filename = "unknown"        # filename defining this block
+        self.lineno   = 0                # line number in filename
+
+        marker        = None             # current marker
+        content       = []               # current content lines list
+        alphanum      = string.letters + string.digits + "_"
+        self.name     = None
+
+        for line in block_line_list:
+            line2  = string.lstrip( line )
+            l      = len( line2 )
+            margin = len( line ) - l
+
+            if l > 3:
+                ender = None
+                if line2[0] == '<':
+                    ender = '>'
+                elif line2[0] == '@':
+                    ender = ':'
+
+                if ender:
+                    i = 1
+                    while i < l and line2[i] in alphanum:
+                        i = i + 1
+                    if i < l and line2[i] == ender:
+                        if marker and content:
+                            self.add( marker, content )
+                        marker  = line2[1 : i]
+                        content = []
+                        line2   = string.lstrip( line2[i+1 :] )
+                        l       = len( line2 )
+                        line    = " " * margin + line2
+
+            content.append( line )
+
+        if marker and content:
+            self.add( marker, content )
+
+        self.source = []
+        if self.items:
+            self.source = source_line_list
+
+        # now retrieve block name when possible
+        #
+        if self.items:
+            first     = self.items[0]
+            self.name = first[1].get_identifier()
+
+
+    # This function adds a new element to 'self.items'.
+    #
+    #   'marker' is a marker string, or None.
+    #   'lines'  is a list of text lines used to compute a list of
+    #            DocContent objects.
+    #
+    def add( self, marker, lines ):
+        # remove the first and last empty lines from the content list
+        #
+        l = len( lines )
+        if l > 0:
+            i = 0
+            while l > 0 and string.strip( lines[l - 1] ) == "":
+                l = l - 1
+            while i < l and string.strip( lines[i] ) == "":
+                i = i + 1
+            lines = lines[i : l]
+            l     = len( lines )
+
+        # add a new marker only if its marker and its content list
+        # are not empty
+        #
+        if l > 0 and marker:
+            content = DocContent( lines )
+            self.items.append( ( string.lower( marker ), content ) )
+
+
+    def find_content( self, marker ):
+        for item in self.items:
+            if ( item[0] == marker ):
+                return item[1]
+        return None
+
+
+    def html_address( self ):
+        section = self.section
+        if section and section.filename:
+            return section.filename + '#' + self.name
+
+        return ""  # this block is not in a section?
+
+
+    def location( self ):
+        return self.filename + ':' + str( self.lineno )
+
+
+    def print_warning( self, message ):
+        sys.stderr.write( "WARNING:" +
+                          self.location() + ": " + message + '\n' )
+
+
+    def print_error( self, message ):
+        sys.stderr.write( "ERROR:" +
+                          self.location() + ": " + message + '\n' )
+        sys.exit()
+
+
+    def dump( self ):
+        for i in range( len( self.items ) ):
+            print "[" + self.items[i][0] + "]"
+            content = self.items[i][1]
+            content.dump()
+
+
+    def dump_html( self, identifiers = None ):
+        types      = ['type', 'struct', 'functype', 'function',
+                      'constant', 'enum', 'macro', 'structure', 'also']
+
+        parameters = ['input', 'inout', 'output', 'return']
+
+        if not self.items:
+            return
+
+        # start of a block
+        #
+        print block_header
+
+        # place html anchor if needed
+        #
+        if self.name:
+            print '<a name="' + self.name + '">'
+            print "<h4>" + self.name + "</h4>"
+            print "</a>"
+
+        # print source code
+        #
+        if not self.source:
+            print block_footer
+            return
+
+        lines = self.source
+        l     = len( lines ) - 1
+        while l >= 0 and string.strip( lines[l] ) == "":
+            l = l - 1
+        print source_header
+        print ""
+        for line in lines[0 : l+1]:
+            print line
+        print source_footer
+
+        in_table = 0
+
+        # dump each (marker,content) element
+        #
+        for element in self.items:
+            marker  = element[0]
+            content = element[1]
+
+            if marker == "description":
+                print description_header
+                content.dump_html( identifiers )
+                print description_footer
+
+            elif not ( marker in types ):
+                sys.stdout.write( marker_header )
+                sys.stdout.write( marker )
+                sys.stdout.write( marker_inter + '\n' )
+                content.dump_html( identifiers )
+                print marker_footer
+
+        print ""
+
+        print block_footer
+
+
+
+#############################################################################
+#
+# The DocSection class is used to store a given documentation section.
+#
+# Each section is made of an identifier, an abstract and a description.
+#
+# For example, look at:
+#
+#   <Section> Basic_Data_Types
+#
+#   <Title> FreeType 2 Basic Data Types
+#
+#   <Abstract>
+#      Definitions of basic FreeType data types
+#
+#   <Description>
+#      FreeType defines several basic data types for all its
+#      operations...
+#
+class DocSection:
+
+    def __init__( self, block ):
+        self.block       = block
+        self.name        = string.lower( block.name )
+        self.abstract    = block.find_content( "abstract" )
+        self.description = block.find_content( "description" )
+        self.elements    = {}
+        self.list        = []
+        self.filename    = self.name + ".html"
+        self.chapter     = None
+
+        # sys.stderr.write( "new section '" + self.name + "'" )
+
+
+    def add_element( self, block ):
+        # check that we don't have a duplicate element in this
+        # section
+        #
+        if self.elements.has_key( block.name ):
+            block.print_error( "duplicate element definition for " +
+                               "'" + block.name + "' " +
+                               "in section " +
+                               "'" + self.name + "'\n" +
+                               "previous definition in " +
+                               "'" + self.elements[block.name].location() + "'" )
+
+        self.elements[block.name] = block
+        self.list.append( block )
+
+
+    def print_warning( self, message ):
+        self.block.print_warning( message )
+
+
+    def print_error( self, message ):
+        self.block.print_error( message )
+
+
+    def dump_html( self, identifiers = None ):
+        """make an HTML page from a given DocSection"""
+
+        # print HTML header
+        #
+        print html_header
+
+        # print title
+        #
+        print section_title_header
+        print self.title
+        print section_title_footer
+
+        # print description
+        #
+        print block_header
+        self.description.dump_html( identifiers )
+        print block_footer
+
+        # print elements
+        #
+        for element in self.list:
+            element.dump_html( identifiers )
+
+        print html_footer
+
+
+class DocSectionList:
+
+    def __init__( self ):
+        self.sections        = {}    # map section names to section objects
+        self.list            = []    # list of sections (in creation order)
+        self.current_section = None  # current section
+        self.identifiers     = {}    # map identifiers to blocks
+
+
+    def append_section( self, block ):
+        name     = string.lower( block.name )
+        abstract = block.find_content( "abstract" )
+
+        if self.sections.has_key( name ):
+            # There is already a section with this name in our list.  We
+            # will try to complete it.
+            #
+            section = self.sections[name]
+            if section.abstract:
+                # This section already has an abstract defined; simply check
+                # that the new section doesn't provide a new one.
+                #
+                if abstract:
+                    section.block.print_error(
+                      "duplicate section definition for " +
+                      "'" + name + "'\n" +
+                      "previous definition in " +
+                      "'" + section.block.location() + "'\n" +
+                      "second definition in " +
+                      "'" + block.location() + "'" )
+            else:
+                # The old section didn't contain an abstract; we are now
+                # going to replace it.
+                #
+                section.abstract    = abstract
+                section.description = block.find_content( "description" )
+                section.block       = block
+
+        else:
+            # a new section
+            #
+            section = DocSection( block )
+            self.sections[name] = section
+            self.list.append( section )
+
+        self.current_section = section
+
+
+    def append_block( self, block ):
+        if block.name:
+            section = block.find_content( "section" )
+            if section:
+                self.append_section( block )
+
+            elif self.current_section:
+                self.current_section.add_element( block )
+                block.section                = self.current_section
+                self.identifiers[block.name] = block
+
+
+    def prepare_files( self, file_prefix = None ):
+        # prepare the section list, by computing section filenames and the
+        # index
+        #
+        if file_prefix:
+            prefix = file_prefix + "-"
+        else:
+            prefix = ""
+
+        # compute section names
+        #
+        for section in self.sections.values():
+            title_content     = section.block.find_content( "title" )
+            if title_content:
+                section.title = title_content.get_title()
+            else:
+                section.title = "UNKNOWN_SECTION_TITLE!"
+
+
+        # sort section elements according to the <order> marker if available
+        #
+        for section in self.sections.values():
+            order = section.block.find_content( "order" )
+            if order:
+                # sys.stderr.write( "<order> found at "
+                #                   + section.block.location() + '\n' )
+                order_list = []
+                for item in order.items:
+                    for element in item[1]:
+                        words = None
+                        try:
+                            words = element.get_words()
+                        except:
+                            section.block.print_warning(
+                              "invalid content in <order> marker\n" )
+                        if words:
+                            for word in words:
+                                block = self.identifiers.get( word )
+                                if block:
+                                    if block.section == section:
+                                        order_list.append( block )
+                                    else:
+                                        section.block.print_warning(
+                                          "invalid reference to " +
+                                          "'" + word + "' " +
+                                          "defined in other section" )
+                                else:
+                                    section.block.print_warning(
+                                      "invalid reference to " +
+                                      "'" + word + "'" )
+
+                # now sort the list of blocks according to the order list
+                #
+                new_list = order_list[:]
+                for block in section.list:
+                    if not block in order_list:
+                        new_list.append( block )
+
+                section.list = new_list
+
+        # compute section filenames
+        #
+        for section in self.sections.values():
+            section.filename = prefix + section.name + ".html"
+
+        self.toc_filename   = prefix + "toc.html"
+        self.index_filename = prefix + "index.html"
+
+        # compute the sorted list of identifiers for the index
+        #
+        self.index = self.identifiers.keys()
+        self.index.sort( index_sort )
+
+
+    def dump_html_sections( self ):
+        for section in self.sections.values():
+            if section.filename:
+                output = open_output( section.filename )
+
+                section.dump_html( self.identifiers )
+
+                close_output( output )
+
+
+    def dump_html_index( self ):
+        output = open_output( self.index_filename )
+
+        num_columns = 3
+        total       = len( self.index )
+        line        = 0
+
+        print html_header
+        print "<center><h1>General Index</h1></center>"
+        print "<center><table cellpadding=5><tr valign=top><td>"
+
+        for ident in self.index:
+            block = self.identifiers[ident]
+            if block:
+                sys.stdout.write( '<a href="' + block.html_address() + '">' )
+                sys.stdout.write( block.name )
+                sys.stdout.write( '</a><br>' + '\n' )
+
+                if line * num_columns >= total:
+                    print "</td><td>"
+                    line = 0
+                else:
+                    line = line + 1
+            else:
+                sys.stderr.write( "identifier '" + ident +
+                                  "' has no definition" + '\n' )
+
+        print "</tr></table></center>"
+        print html_footer
+
+        close_output( output )
+
+
+
+# Filter a given list of DocBlocks.  Returns a new list of DocBlock objects
+# that only contains element whose "type" (i.e. first marker) is in the
+# "types" parameter.
+#
+class DocChapter:
+
+    def __init__( self, block ):
+        self.sections_names = []    # ordered list of section names
+        self.sections       = []    # ordered list of DocSection objects
+                                    # for this chapter
+        self.block          = block
+
+        # look for chapter title
+        content = block.find_content( "title" )
+        if content:
+            self.title = content.get_title()
+        else:
+            self.title = "UNKNOWN CHAPTER TITLE"
+
+        # look for section list
+        content = block.find_content( "sections" )
+        if not content:
+            block.print_error( "chapter has no <sections> content" )
+
+        # compute list of section names
+        slist = []
+        for item in content.items:
+            for element in item[1]:
+                try:
+                    words        = element.get_words()
+                    l            = len( slist )
+                    slist[l : l] = words
+                except:
+                    block.print_warning(
+                      "invalid content in <sections> marker" )
+
+        self.section_names = slist
+
+
+class DocDocument:
+
+    def __init__( self ):
+        self.section_list  = DocSectionList()   # section list object
+        self.chapters      = []                 # list of chapters
+        self.lost_sections = []                 # list of sections with
+                                                # no chapter
+
+    def append_block( self, block ):
+        if block.name:
+            content = block.find_content( "chapter" )
+            if content:
+                # a chapter definition -- add it to our list
+                #
+                chapter = DocChapter( block )
+                self.chapters.append( chapter )
+            else:
+                self.section_list.append_block( block )
+
+
+    def prepare_chapters( self ):
+        # check section names
+        #
+        for chapter in self.chapters:
+            slist = []
+            for name in chapter.section_names:
+                 section = self.section_list.sections.get( name )
+                 if not section:
+                     chapter.block.print_warning(
+                       "invalid reference to unknown section '" + name + "'" )
+                 else:
+                     section.chapter = chapter
+                     slist.append( section )
+
+            chapter.sections = slist
+
+        for section in self.section_list.list:
+            if not section.chapter:
+                section.block.print_warning(
+                  "section '" + section.name + "' is not in any chapter" )
+                self.lost_sections.append( section )
+
+
+    def prepare_files( self, file_prefix = None ):
+        self.section_list.prepare_files( file_prefix )
+        self.prepare_chapters()
+
+
+    def dump_toc_html( self ):
+        # dump an html table of contents
+        #
+        output = open_output( self.section_list.toc_filename )
+
+        print html_header
+
+        print "<center><h1>Table of Contents</h1></center>"
+
+        for chapter in self.chapters:
+            print chapter_header + chapter.title + chapter_inter
+
+            print "<table cellpadding=5>"
+            for section in chapter.sections:
+                if section.abstract:
+                    print "<tr valign=top><td>"
+                    sys.stdout.write( '<a href="' + section.filename + '">' )
+                    sys.stdout.write( section.title )
+                    sys.stdout.write( "</a></td><td>" + '\n' )
+                    section.abstract.dump_html( self.section_list.identifiers )
+                    print "</td></tr>"
+
+            print "</table>"
+
+            print chapter_footer
+
+        # list lost sections
+        #
+        if self.lost_sections:
+            print chapter_header + "OTHER SECTIONS:" + chapter_inter
+
+            print "<table cellpadding=5>"
+            for section in self.lost_sections:
+                if section.abstract:
+                    print "<tr valign=top><td>"
+                    sys.stdout.write( '<a href="' + section.filename + '">' )
+                    sys.stdout.write( section.title )
+                    sys.stdout.write( "</a></td><td>" + '\n' )
+                    section.abstract.dump_html( self.section_list.identifiers )
+                    print "</td></tr>"
+
+            print "</table>"
+
+            print chapter_footer
+
+        # index
+        #
+        print chapter_header + '<a href="' + self.section_list.index_filename + '">Index</a>' + chapter_footer
+
+        print html_footer
+
+        close_output( output )
+
+
+    def dump_index_html( self ):
+        self.section_list.dump_html_index()
+
+
+    def dump_sections_html( self ):
+        self.section_list.dump_html_sections()
+
+
+def filter_blocks_by_type( block_list, types ):
+    new_list = []
+    for block in block_list:
+        if block.items:
+            element = block.items[0]
+            marker  = element[0]
+            if marker in types:
+                new_list.append( block )
+
+    return new_list
+
+
+def filter_section_blocks( block ):
+    return block.section != None
+
+
+# Perform a lexicographical comparison of two DocBlock objects.  Returns -1,
+# 0 or 1.
+#
+def block_lexicographical_compare( b1, b2 ):
+    if not b1.name:
+        return -1
+    if not b2.name:
+        return 1
+
+    id1 = string.lower( b1.name )
+    id2 = string.lower( b2.name )
+
+    if id1 < id2:
+        return -1
+    elif id1 == id2:
+        return 0
+    else:
+        return 1
+
+
+# Dump a list block as a single HTML page.
+#
+def dump_html_1( block_list ):
+    print html_header
+
+    for block in block_list:
+        block.dump_html()
+
+    print html_footer
+
+
+def file_exists( pathname ):
+    result = 1
+    try:
+        file = open( pathname, "r" )
+        file.close()
+    except:
+        result = None
+
+    return result
+
+
+def add_new_block( list, filename, lineno, block_lines, source_lines ):
+    """add a new block to the list"""
+    block          = DocBlock( block_lines, source_lines )
+    block.filename = filename
+    block.lineno   = lineno
+    list.append( block )
+
+
+def make_block_list( args = None ):
+    """parse a file and extract comments blocks from it"""
+
+    file_list = []
+    # sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
+
+    if not args:
+        args = sys.argv[1 :]
+
+    for pathname in args:
+        if string.find( pathname, '*' ) >= 0:
+            newpath = glob.glob( pathname )
+            newpath.sort()  # sort files -- this is important because
+                            # of the order of files
+        else:
+            newpath = [pathname]
+
+        last = len( file_list )
+        file_list[last : last] = newpath
+
+    if len( file_list ) == 0:
+        file_list = None
+    else:
+        # now filter the file list to remove non-existing ones
+        file_list = filter( file_exists, file_list )
+
+    list   = []
+    block  = []
+    format = 0
+    lineno = 0
+
+    # We use "format" to store the state of our parser:
+    #
+    #  0 - wait for beginning of comment
+    #  1 - parse comment format 1
+    #  2 - parse comment format 2
+    #
+    #  4 - wait for beginning of source (or comment?)
+    #  5 - process source
+    #
+    comment = []
+    source  = []
+    state   = 0
+
+    fileinput.close()
+    for line in fileinput.input( file_list ):
+        l = len( line )
+        if l > 0 and line[l - 1] == '\012':
+            line = line[0 : l-1]
+
+        # stripped version of the line
+        #
+        line2 = string.strip( line )
+        l     = len( line2 )
+
+        # if this line begins with a comment and we are processing some
+        # source, exit to state 0
+        #
+        # unless we encounter something like:
+        #
+        #    /*@.....
+        #    /*#.....
+        #
+        #    /* @.....
+        #    /* #.....
+        #
+        if format >= 4 and l > 2 and line2[0 : 2] == '/*':
+            if l < 4 or ( line2[2] != '@' and line2[2 : 4] != ' @' and
+                          line2[2] != '#' and line2[2 : 4] != ' #'):
+                add_new_block( list, fileinput.filename(),
+                               lineno, block, source )
+                format = 0
+
+        if format == 0:  #### wait for beginning of comment ####
+            if l > 3 and line2[0 : 3] == '/**':
+                i = 3
+                while i < l and line2[i] == '*':
+                    i = i + 1
+
+                if i == l:
+                    # this is '/**' followed by any number of '*', the
+                    # beginning of a Format 1 block
+                    #
+                    block  = []
+                    source = []
+                    format = 1
+                    lineno = fileinput.filelineno()
+
+                elif i == l - 1 and line2[i] == '/':
+                    # this is '/**' followed by any number of '*', followed
+                    # by a '/', i.e. the beginning of a Format 2 or 3 block
+                    #
+                    block  = []
+                    source = []
+                    format = 2
+                    lineno = fileinput.filelineno()
+
+        ##############################################################
+        #
+        # FORMAT 1
+        #
+        elif format == 1:
+
+            # If the line doesn't begin with a "*", something went wrong,
+            # and we must exit, and forget the current block.
+            #
+            if l == 0 or line2[0] != '*':
+                block  = []
+                format = 0
+
+            # Otherwise, we test for an end of block, which is an arbitrary
+            # number of '*', followed by '/'.
+            #
+            else:
+                i = 1
+                while i < l and line2[i] == '*':
+                    i = i + 1
+
+                # test for the end of the block
+                #
+                if i < l and line2[i] == '/':
+                    if block != []:
+                        format = 4
+                    else:
+                        format = 0
+                else:
+                    # otherwise simply append line to current block
+                    #
+                    block.append( line2[i :] )
+
+                continue
+
+        ##############################################################
+        #
+        # FORMAT 2
+        #
+        elif format == 2:
+
+            # If the line doesn't begin with '/*' and end with '*/', this is
+            # the end of the format 2 format.
+            #
+            if l < 4 or line2[: 2] != '/*' or line2[-2 :] != '*/':
+                if block != []:
+                    format = 4
+                else:
+                    format = 0
+            else:
+                # remove the start and end comment delimiters, then
+                # right-strip the line
+                #
+                line2 = string.rstrip( line2[2 : -2] )
+
+                # check for end of a format2 block, i.e. a run of '*'
+                #
+                if string.count( line2, '*' ) == l - 4:
+                    if block != []:
+                        format = 4
+                    else:
+                        format = 0
+                else:
+                    # otherwise, add the line to the current block
+                    #
+                    block.append( line2 )
+
+                continue
+
+        if format >= 4:  #### source processing ####
+            if l > 0:
+                format = 5
+
+            if format == 5:
+                source.append( line )
+
+    if format >= 4:
+        add_new_block( list, fileinput.filename(), lineno, block, source )
+
+    return list
+
+
+
+# This function is only used for debugging
+#
+def dump_block_list( list ):
+    """dump a comment block list"""
+    for block in list:
+        print "----------------------------------------"
+        for line in block[0]:
+            print line
+        for line in block[1]:
+            print line
+
+    print "---------the end-----------------------"
+
+
+def usage():
+    print "\nDocMaker 0.1 Usage information\n"
+    print "  docmaker [options] file1 [ file2 ... ]\n"
+    print "using the following options:\n"
+    print "  -h : print this page"
+    print "  -t : set project title, as in '-t \"My Project\"'"
+    print "  -o : set output directory, as in '-o mydir'"
+    print "  -p : set documentation prefix, as in '-p ft2'"
+    print ""
+    print "  --title  : same as -t, as in '--title=\"My Project\"'"
+    print "  --output : same as -o, as in '--output=mydir'"
+    print "  --prefix : same as -p, as in '--prefix=ft2'"
+
+
+def main( argv ):
+    """main program loop"""
+
+    global output_dir, project_title, project_prefix
+    global html_header, html_header1, html_header2, html_header3
+
+    try:
+        opts, args = getopt.getopt( sys.argv[1:],
+                                    "ht:o:p:",
+                                    [ "help", "title=", "output=", "prefix=" ] )
+
+    except getopt.GetoptError:
+        usage()
+        sys.exit( 2 )
+
+    if args == []:
+        usage()
+        sys.exit( 1 )
+
+    # process options
+    #
+    project_title  = "Project"
+    project_prefix = None
+    output_dir     = None
+
+    for opt in opts:
+        if opt[0] in ( "-h", "--help" ):
+            usage()
+            sys.exit( 0 )
+
+        if opt[0] in ( "-t", "--title" ):
+            project_title = opt[1]
+
+        if opt[0] in ( "-o", "--output" ):
+            output_dir = opt[1]
+
+        if opt[0] in ( "-p", "--prefix" ):
+            project_prefix = opt[1]
+
+    html_header = html_header_1 + project_title + html_header_2 + project_title + html_header_3
+    check_output( )
+    compute_time_html()
+
+    # we begin by simply building a list of DocBlock elements
+    #
+    list = make_block_list( args )
+
+    # now, sort the blocks into sections
+    #
+    document = DocDocument()
+    for block in list:
+        document.append_block( block )
+
+    document.prepare_files( project_prefix )
+
+    document.dump_toc_html()
+    document.dump_sections_html()
+    document.dump_index_html()
+
+# if called from the command line
+#
+if __name__ == '__main__':
+    main( sys.argv )
+
+
+# eof
--- /dev/null
+++ b/src/tools/glnames.py
@@ -1,0 +1,1706 @@
+#!/usr/bin/env python
+#
+
+#
+# FreeType 2 glyph name builder
+#
+
+
+# Copyright 1996-2000 by
+# David Turner, Robert Wilhelm, and Werner Lemberg.
+#
+# This file is part of the FreeType project, and may only be used, modified,
+# and distributed under the terms of the FreeType project license,
+# LICENSE.TXT.  By continuing to use, modify, or distribute this file you
+# indicate that you have read the license and understand and accept it
+# fully.
+
+
+"""\
+
+usage: %s <output-file>
+
+  This very simple python script is used to generate the glyph names
+  tables defined in the PSNames module.
+
+  Its single argument is the name of the header file to be created.
+"""
+
+
+import sys, string
+
+
+# This table is used to name the glyph according to the Macintosh
+# specification.  It is used by the TrueType Postscript names table
+#
+mac_standard_names = \
+[
+  # 0
+  ".notdef", ".null", "CR", "space", "exclam",
+  "quotedbl", "numbersign", "dollar", "percent", "ampersand",
+
+  # 10
+  "quotesingle", "parenleft", "parenright", "asterisk", "plus",
+  "comma", "hyphen", "period", "slash", "zero",
+
+  # 20
+  "one", "two", "three", "four", "five",
+  "six", "seven", "eight", "nine", "colon",
+
+  # 30
+  "semicolon", "less", "equal", "greater", "question",
+  "at", "A", "B", "C", "D",
+
+  # 40
+  "E", "F", "G", "H", "I",
+  "J", "K", "L", "M", "N",
+
+  # 50
+  "O", "P", "Q", "R", "S",
+  "T", "U", "V", "W", "X",
+
+  # 60
+  "Y", "Z", "bracketleft", "backslash", "bracketright",
+  "asciicircum", "underscore", "grave", "a", "b",
+
+  # 70
+  "c", "d", "e", "f", "g",
+  "h", "i", "j", "k", "l",
+
+  # 80
+  "m", "n", "o", "p", "q",
+  "r", "s", "t", "u", "v",
+
+  # 90
+  "w", "x", "y", "z", "braceleft",
+  "bar", "braceright", "asciitilde", "Adieresis", "Aring",
+
+  # 100
+  "Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
+  "aacute", "agrave", "acircumflex", "adieresis", "atilde",
+
+  # 110
+  "aring", "ccedilla", "eacute", "egrave", "ecircumflex",
+  "edieresis", "iacute", "igrave", "icircumflex", "idieresis",
+
+  # 120
+  "ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
+  "otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
+
+  # 130
+  "dagger", "degree", "cent", "sterling", "section",
+  "bullet", "paragraph", "germandbls", "registered", "copyright",
+
+  # 140
+  "trademark", "acute", "dieresis", "notequal", "AE",
+  "Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
+
+  # 150
+  "yen", "mu", "partialdiff", "summation", "product",
+  "pi", "integral", "ordfeminine", "ordmasculine", "Omega",
+
+  # 160
+  "ae", "oslash", "questiondown", "exclamdown", "logicalnot",
+  "radical", "florin", "approxequal", "Delta", "guillemotleft",
+
+  # 170
+  "guillemotright", "ellipsis", "nbspace", "Agrave", "Atilde",
+  "Otilde", "OE", "oe", "endash", "emdash",
+
+  # 180
+  "quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
+  "lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
+
+  # 190
+  "guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
+  "periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
+    "Acircumflex",
+
+  # 200
+  "Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
+  "Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
+
+  # 210
+  "apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
+  "dotlessi", "circumflex", "tilde", "macron", "breve",
+
+  # 220
+  "dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
+  "caron", "Lslash", "lslash", "Scaron", "scaron",
+
+  # 230
+  "Zcaron", "zcaron", "brokenbar", "Eth", "eth",
+  "Yacute", "yacute", "Thorn", "thorn", "minus",
+
+  # 240
+  "multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
+  "onequarter", "threequarters", "franc", "Gbreve", "gbreve",
+
+  # 250
+  "Idot", "Scedilla", "scedilla", "Cacute", "cacute",
+  "Ccaron", "ccaron", "dmacron"
+]
+
+
+t1_standard_strings = \
+[
+  # 0
+  ".notdef", "space", "exclam", "quotedbl", "numbersign",
+  "dollar", "percent", "ampersand", "quoteright", "parenleft",
+
+  # 10
+  "parenright", "asterisk", "plus", "comma", "hyphen",
+  "period", "slash", "zero", "one", "two",
+
+  # 20
+  "three", "four", "five", "six", "seven",
+  "eight", "nine", "colon", "semicolon", "less",
+
+  # 30
+  "equal", "greater", "question", "at", "A",
+  "B", "C", "D", "E", "F",
+
+  # 40
+  "G", "H", "I", "J", "K",
+  "L", "M", "N", "O", "P",
+
+  # 50
+  "Q", "R", "S", "T", "U",
+  "V", "W", "X", "Y", "Z",
+
+  # 60
+  "bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
+  "quoteleft", "a", "b", "c", "d",
+
+  # 70
+  "e", "f", "g", "h", "i",
+  "j", "k", "l", "m", "n",
+
+  # 80
+  "o", "p", "q", "r", "s",
+  "t", "u", "v", "w", "x",
+
+  # 90
+  "y", "z", "braceleft", "bar", "braceright",
+  "asciitilde", "exclamdown", "cent", "sterling", "fraction",
+
+  # 100
+  "yen", "florin", "section", "currency", "quotesingle",
+  "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
+
+  # 110
+  "fl", "endash", "dagger", "daggerdbl", "periodcenter",
+  "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
+
+  # 120
+  "guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
+  "acute", "circumflex", "tilde", "macron", "breve",
+
+  # 130
+  "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
+  "ogonek", "caron", "emdash", "AE", "ordfeminine",
+
+  # 140
+  "Lslash", "Oslash", "OE", "ordmasculine", "ae",
+  "dotlessi", "Islash", "oslash", "oe", "germandbls",
+
+  # 150
+  "onesuperior", "logicalnot", "mu", "trademark", "Eth",
+  "onehalf", "plusminus", "Thorn", "onequarter", "divide",
+
+  # 160
+  "brokenbar", "degree", "thorn", "threequarters", "twosuperior",
+  "registered", "minus", "eth", "multiply", "threesuperior",
+
+  # 170
+  "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
+  "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
+
+  # 180
+  "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
+  "Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
+
+  # 190
+  "Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
+  "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
+
+  # 200
+  "aacute", "acircumflex", "adieresis", "agrave", "aring",
+  "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
+
+  # 210
+  "egrave", "iacute", "icircumflex", "idieresis", "igrave",
+  "ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
+
+  # 220
+  "otilde", "scaron", "uacute", "ucircumflex", "udieresis",
+  "ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
+
+  # 230
+  "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
+    "Acutesmall",
+  "parenleftsuperior", "parenrightsuperior", "twodotenleader",
+    "onedotenleader", "zerooldstyle",
+
+  # 240
+  "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
+    "fiveoldstyle",
+  "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
+    "commasuperior",
+
+  # 250
+  "threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
+    "bsuperior",
+  "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
+
+  # 260
+  "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
+  "tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
+
+  # 270
+  "parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
+    "Asmall",
+  "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
+
+  # 280
+  "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
+  "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
+
+  # 290
+  "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
+  "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
+
+  # 300
+  "colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
+  "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
+    "Dieresissmall",
+
+  # 310
+  "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
+  "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
+    "questiondownsmall",
+
+  # 320
+  "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
+  "twothirds", "zerosuperior", "foursuperior", "fivesuperior",
+    "sixsuperior",
+
+  # 330
+  "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
+    "oneinferior",
+  "twoinferior", "threeinferior", "fourinferior", "fiveinferior",
+    "sixinferior",
+
+  # 340
+  "seveninferior", "eightinferior", "nineinferior", "centinferior",
+    "dollarinferior",
+  "periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
+    "Acircumflexsmall",
+
+  # 350
+  "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
+  "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
+    "Igravesmall",
+
+  # 360
+  "Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
+    "Ntildesmall",
+  "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
+    "Odieresissmall",
+
+  # 370
+  "OEsmall", "Oslashsmall", "Ugravesmall", "Uacautesmall",
+    "Ucircumflexsmall",
+  "Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
+    "001.000",
+
+  # 380
+  "001.001", "001.002", "001.003", "Black", "Bold",
+  "Book", "Light", "Medium", "Regular", "Roman",
+
+  # 390
+  "Semibold"
+]
+
+
+t1_standard_encoding = \
+[
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   1,   2,   3,   4,   5,   6,   7,   8,
+    9,  10,  11,  12,  13,  14,  15,  16,  17,  18,
+
+   19,  20,  21,  22,  23,  24,  25,  26,  27,  28,
+   29,  30,  31,  32,  33,  34,  35,  36,  37,  38,
+   39,  40,  41,  42,  43,  44,  45,  46,  47,  48,
+   49,  50,  51,  52,  53,  54,  55,  56,  57,  58,
+   59,  60,  61,  62,  63,  64,  65,  66,  67,  68,
+
+   69,  70,  71,  72,  73,  74,  75,  76,  77,  78,
+   79,  80,  81,  82,  83,  84,  85,  86,  87,  88,
+   89,  90,  91,  92,  93,  94,  95,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,  96,  97,  98,  99, 100, 101, 102, 103, 104,
+  105, 106, 107, 108, 109, 110,   0, 111, 112, 113,
+  114,   0, 115, 116, 117, 118, 119, 120, 121, 122,
+    0, 123,   0, 124, 125, 126, 127, 128, 129, 130,
+
+  131,   0, 132, 133,   0, 134, 135, 136, 137,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   0,   0, 138,   0, 139,   0,   0,
+    0,   0, 140, 141, 142, 143,   0,   0,   0,   0,
+    0, 144,   0,   0,   0, 145,   0,   0, 146, 147,
+
+  148, 149,   0,   0,   0,   0
+]
+
+
+t1_expert_encoding = \
+[
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   1, 229, 230,   0, 231, 232, 233, 234,
+  235, 236, 237, 238,  13,  14,  15,  99, 239, 240,
+
+  241, 242, 243, 244, 245, 246, 247, 248,  27,  28,
+  249, 250, 251, 252,   0, 253, 254, 255, 256, 257,
+    0,   0,   0, 258,   0,   0, 259, 260, 261, 262,
+    0,   0, 263, 264, 265,   0, 266, 109, 110, 267,
+  268, 269,   0, 270, 271, 272, 273, 274, 275, 276,
+
+  277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+  287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+  297, 298, 299, 300, 301, 302, 303,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0, 304, 305, 306,   0,   0, 307, 308, 309, 310,
+  311,   0, 312,   0,   0, 312,   0,   0, 314, 315,
+    0,   0, 316, 317, 318,   0,   0,   0, 158, 155,
+  163, 319, 320, 321, 322, 323, 324, 325,   0,   0,
+
+  326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
+  333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
+  343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+  353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
+  363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
+
+  373, 374, 375, 376, 377, 378
+]
+
+
+# This data has been taken literally from the file `glyphlist.txt',
+# version 1.2, 22 Oct 1998.  It is available from
+#
+#   http://partners.adobe.com/asn/developer/typeforum/unicodegn.html
+#
+adobe_glyph_list = """\
+0041;A;LATIN CAPITAL LETTER A
+00C6;AE;LATIN CAPITAL LETTER AE
+01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE
+F7E6;AEsmall;LATIN SMALL CAPITAL LETTER AE
+00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE
+F7E1;Aacutesmall;LATIN SMALL CAPITAL LETTER A WITH ACUTE
+0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE
+00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+F7E2;Acircumflexsmall;LATIN SMALL CAPITAL LETTER A WITH CIRCUMFLEX
+F6C9;Acute;CAPITAL ACUTE ACCENT
+F7B4;Acutesmall;SMALL CAPITAL ACUTE ACCENT
+00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS
+F7E4;Adieresissmall;LATIN SMALL CAPITAL LETTER A WITH DIAERESIS
+00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE
+F7E0;Agravesmall;LATIN SMALL CAPITAL LETTER A WITH GRAVE
+0391;Alpha;GREEK CAPITAL LETTER ALPHA
+0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS
+0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON
+0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK
+00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE
+01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
+F7E5;Aringsmall;LATIN SMALL CAPITAL LETTER A WITH RING ABOVE
+F761;Asmall;LATIN SMALL CAPITAL LETTER A
+00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE
+F7E3;Atildesmall;LATIN SMALL CAPITAL LETTER A WITH TILDE
+0042;B;LATIN CAPITAL LETTER B
+0392;Beta;GREEK CAPITAL LETTER BETA
+F6F4;Brevesmall;SMALL CAPITAL BREVE
+F762;Bsmall;LATIN SMALL CAPITAL LETTER B
+0043;C;LATIN CAPITAL LETTER C
+0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE
+F6CA;Caron;CAPITAL CARON
+F6F5;Caronsmall;SMALL CAPITAL CARON
+010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON
+00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA
+F7E7;Ccedillasmall;LATIN SMALL CAPITAL LETTER C WITH CEDILLA
+0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX
+010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE
+F7B8;Cedillasmall;SMALL CAPITAL CEDILLA
+03A7;Chi;GREEK CAPITAL LETTER CHI
+F6F6;Circumflexsmall;SMALL CAPITAL MODIFIER LETTER CIRCUMFLEX ACCENT
+F763;Csmall;LATIN SMALL CAPITAL LETTER C
+0044;D;LATIN CAPITAL LETTER D
+010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON
+0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE
+2206;Delta;INCREMENT
+0394;Delta;GREEK CAPITAL LETTER DELTA;Duplicate
+F6CB;Dieresis;CAPITAL DIAERESIS
+F6CC;DieresisAcute;CAPITAL DIAERESIS ACUTE ACCENT
+F6CD;DieresisGrave;CAPITAL DIAERESIS GRAVE ACCENT
+F7A8;Dieresissmall;SMALL CAPITAL DIAERESIS
+F6F7;Dotaccentsmall;SMALL CAPITAL DOT ABOVE
+F764;Dsmall;LATIN SMALL CAPITAL LETTER D
+0045;E;LATIN CAPITAL LETTER E
+00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE
+F7E9;Eacutesmall;LATIN SMALL CAPITAL LETTER E WITH ACUTE
+0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE
+011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON
+00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+F7EA;Ecircumflexsmall;LATIN SMALL CAPITAL LETTER E WITH CIRCUMFLEX
+00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS
+F7EB;Edieresissmall;LATIN SMALL CAPITAL LETTER E WITH DIAERESIS
+0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE
+00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE
+F7E8;Egravesmall;LATIN SMALL CAPITAL LETTER E WITH GRAVE
+0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON
+014A;Eng;LATIN CAPITAL LETTER ENG
+0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK
+0395;Epsilon;GREEK CAPITAL LETTER EPSILON
+0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS
+F765;Esmall;LATIN SMALL CAPITAL LETTER E
+0397;Eta;GREEK CAPITAL LETTER ETA
+0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS
+00D0;Eth;LATIN CAPITAL LETTER ETH
+F7F0;Ethsmall;LATIN SMALL CAPITAL LETTER ETH
+20AC;Euro;EURO SIGN
+0046;F;LATIN CAPITAL LETTER F
+F766;Fsmall;LATIN SMALL CAPITAL LETTER F
+0047;G;LATIN CAPITAL LETTER G
+0393;Gamma;GREEK CAPITAL LETTER GAMMA
+011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE
+01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON
+011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX
+0122;Gcommaaccent;LATIN CAPITAL LETTER G WITH CEDILLA
+0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE
+F6CE;Grave;CAPITAL GRAVE ACCENT
+F760;Gravesmall;SMALL CAPITAL GRAVE ACCENT
+F767;Gsmall;LATIN SMALL CAPITAL LETTER G
+0048;H;LATIN CAPITAL LETTER H
+25CF;H18533;BLACK CIRCLE
+25AA;H18543;BLACK SMALL SQUARE
+25AB;H18551;WHITE SMALL SQUARE
+25A1;H22073;WHITE SQUARE
+0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE
+0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX
+F768;Hsmall;LATIN SMALL CAPITAL LETTER H
+F6CF;Hungarumlaut;CAPITAL DOUBLE ACUTE ACCENT
+F6F8;Hungarumlautsmall;SMALL CAPITAL DOUBLE ACUTE ACCENT
+0049;I;LATIN CAPITAL LETTER I
+0132;IJ;LATIN CAPITAL LIGATURE IJ
+00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE
+F7ED;Iacutesmall;LATIN SMALL CAPITAL LETTER I WITH ACUTE
+012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE
+00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+F7EE;Icircumflexsmall;LATIN SMALL CAPITAL LETTER I WITH CIRCUMFLEX
+00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS
+F7EF;Idieresissmall;LATIN SMALL CAPITAL LETTER I WITH DIAERESIS
+0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE
+2111;Ifraktur;BLACK-LETTER CAPITAL I
+00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE
+F7EC;Igravesmall;LATIN SMALL CAPITAL LETTER I WITH GRAVE
+012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON
+012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK
+0399;Iota;GREEK CAPITAL LETTER IOTA
+03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS
+F769;Ismall;LATIN SMALL CAPITAL LETTER I
+0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE
+004A;J;LATIN CAPITAL LETTER J
+0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX
+F76A;Jsmall;LATIN SMALL CAPITAL LETTER J
+004B;K;LATIN CAPITAL LETTER K
+039A;Kappa;GREEK CAPITAL LETTER KAPPA
+0136;Kcommaaccent;LATIN CAPITAL LETTER K WITH CEDILLA
+F76B;Ksmall;LATIN SMALL CAPITAL LETTER K
+004C;L;LATIN CAPITAL LETTER L
+F6BF;LL;LATIN CAPITAL LETTER LL
+0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE
+039B;Lambda;GREEK CAPITAL LETTER LAMDA
+013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON
+013B;Lcommaaccent;LATIN CAPITAL LETTER L WITH CEDILLA
+013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT
+0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE
+F6F9;Lslashsmall;LATIN SMALL CAPITAL LETTER L WITH STROKE
+F76C;Lsmall;LATIN SMALL CAPITAL LETTER L
+004D;M;LATIN CAPITAL LETTER M
+F6D0;Macron;CAPITAL MACRON
+F7AF;Macronsmall;SMALL CAPITAL MACRON
+F76D;Msmall;LATIN SMALL CAPITAL LETTER M
+039C;Mu;GREEK CAPITAL LETTER MU
+004E;N;LATIN CAPITAL LETTER N
+0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE
+0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON
+0145;Ncommaaccent;LATIN CAPITAL LETTER N WITH CEDILLA
+F76E;Nsmall;LATIN SMALL CAPITAL LETTER N
+00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE
+F7F1;Ntildesmall;LATIN SMALL CAPITAL LETTER N WITH TILDE
+039D;Nu;GREEK CAPITAL LETTER NU
+004F;O;LATIN CAPITAL LETTER O
+0152;OE;LATIN CAPITAL LIGATURE OE
+F6FA;OEsmall;LATIN SMALL CAPITAL LIGATURE OE
+00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE
+F7F3;Oacutesmall;LATIN SMALL CAPITAL LETTER O WITH ACUTE
+014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE
+00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+F7F4;Ocircumflexsmall;LATIN SMALL CAPITAL LETTER O WITH CIRCUMFLEX
+00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS
+F7F6;Odieresissmall;LATIN SMALL CAPITAL LETTER O WITH DIAERESIS
+F6FB;Ogoneksmall;SMALL CAPITAL OGONEK
+00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE
+F7F2;Ogravesmall;LATIN SMALL CAPITAL LETTER O WITH GRAVE
+01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN
+0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON
+2126;Omega;OHM SIGN
+03A9;Omega;GREEK CAPITAL LETTER OMEGA;Duplicate
+038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS
+039F;Omicron;GREEK CAPITAL LETTER OMICRON
+038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS
+00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE
+01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
+F7F8;Oslashsmall;LATIN SMALL CAPITAL LETTER O WITH STROKE
+F76F;Osmall;LATIN SMALL CAPITAL LETTER O
+00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE
+F7F5;Otildesmall;LATIN SMALL CAPITAL LETTER O WITH TILDE
+0050;P;LATIN CAPITAL LETTER P
+03A6;Phi;GREEK CAPITAL LETTER PHI
+03A0;Pi;GREEK CAPITAL LETTER PI
+03A8;Psi;GREEK CAPITAL LETTER PSI
+F770;Psmall;LATIN SMALL CAPITAL LETTER P
+0051;Q;LATIN CAPITAL LETTER Q
+F771;Qsmall;LATIN SMALL CAPITAL LETTER Q
+0052;R;LATIN CAPITAL LETTER R
+0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE
+0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON
+0156;Rcommaaccent;LATIN CAPITAL LETTER R WITH CEDILLA
+211C;Rfraktur;BLACK-LETTER CAPITAL R
+03A1;Rho;GREEK CAPITAL LETTER RHO
+F6FC;Ringsmall;SMALL CAPITAL RING ABOVE
+F772;Rsmall;LATIN SMALL CAPITAL LETTER R
+0053;S;LATIN CAPITAL LETTER S
+250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT
+2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT
+2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT
+2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT
+253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL
+251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT
+2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL
+2502;SF110000;BOX DRAWINGS LIGHT VERTICAL
+2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL
+2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT
+255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT
+255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT
+2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT
+2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL
+256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE
+0160;Scaron;LATIN CAPITAL LETTER S WITH CARON
+F6FD;Scaronsmall;LATIN SMALL CAPITAL LETTER S WITH CARON
+015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA
+F6C1;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA;Duplicate
+015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX
+0218;Scommaaccent;LATIN CAPITAL LETTER S WITH COMMA BELOW
+03A3;Sigma;GREEK CAPITAL LETTER SIGMA
+F773;Ssmall;LATIN SMALL CAPITAL LETTER S
+0054;T;LATIN CAPITAL LETTER T
+03A4;Tau;GREEK CAPITAL LETTER TAU
+0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE
+0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON
+0162;Tcommaaccent;LATIN CAPITAL LETTER T WITH CEDILLA
+021A;Tcommaaccent;LATIN CAPITAL LETTER T WITH COMMA BELOW;Duplicate
+0398;Theta;GREEK CAPITAL LETTER THETA
+00DE;Thorn;LATIN CAPITAL LETTER THORN
+F7FE;Thornsmall;LATIN SMALL CAPITAL LETTER THORN
+F6FE;Tildesmall;SMALL CAPITAL SMALL TILDE
+F774;Tsmall;LATIN SMALL CAPITAL LETTER T
+0055;U;LATIN CAPITAL LETTER U
+00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE
+F7FA;Uacutesmall;LATIN SMALL CAPITAL LETTER U WITH ACUTE
+016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE
+00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+F7FB;Ucircumflexsmall;LATIN SMALL CAPITAL LETTER U WITH CIRCUMFLEX
+00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS
+F7FC;Udieresissmall;LATIN SMALL CAPITAL LETTER U WITH DIAERESIS
+00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE
+F7F9;Ugravesmall;LATIN SMALL CAPITAL LETTER U WITH GRAVE
+01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN
+0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON
+0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK
+03A5;Upsilon;GREEK CAPITAL LETTER UPSILON
+03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL
+03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS
+016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE
+F775;Usmall;LATIN SMALL CAPITAL LETTER U
+0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE
+0056;V;LATIN CAPITAL LETTER V
+F776;Vsmall;LATIN SMALL CAPITAL LETTER V
+0057;W;LATIN CAPITAL LETTER W
+1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE
+0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX
+1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS
+1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE
+F777;Wsmall;LATIN SMALL CAPITAL LETTER W
+0058;X;LATIN CAPITAL LETTER X
+039E;Xi;GREEK CAPITAL LETTER XI
+F778;Xsmall;LATIN SMALL CAPITAL LETTER X
+0059;Y;LATIN CAPITAL LETTER Y
+00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE
+F7FD;Yacutesmall;LATIN SMALL CAPITAL LETTER Y WITH ACUTE
+0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
+0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS
+F7FF;Ydieresissmall;LATIN SMALL CAPITAL LETTER Y WITH DIAERESIS
+1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE
+F779;Ysmall;LATIN SMALL CAPITAL LETTER Y
+005A;Z;LATIN CAPITAL LETTER Z
+0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE
+017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON
+F6FF;Zcaronsmall;LATIN SMALL CAPITAL LETTER Z WITH CARON
+017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE
+0396;Zeta;GREEK CAPITAL LETTER ZETA
+F77A;Zsmall;LATIN SMALL CAPITAL LETTER Z
+0061;a;LATIN SMALL LETTER A
+00E1;aacute;LATIN SMALL LETTER A WITH ACUTE
+0103;abreve;LATIN SMALL LETTER A WITH BREVE
+00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX
+00B4;acute;ACUTE ACCENT
+0301;acutecomb;COMBINING ACUTE ACCENT
+00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS
+00E6;ae;LATIN SMALL LETTER AE
+01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE
+2015;afii00208;HORIZONTAL BAR
+0410;afii10017;CYRILLIC CAPITAL LETTER A
+0411;afii10018;CYRILLIC CAPITAL LETTER BE
+0412;afii10019;CYRILLIC CAPITAL LETTER VE
+0413;afii10020;CYRILLIC CAPITAL LETTER GHE
+0414;afii10021;CYRILLIC CAPITAL LETTER DE
+0415;afii10022;CYRILLIC CAPITAL LETTER IE
+0401;afii10023;CYRILLIC CAPITAL LETTER IO
+0416;afii10024;CYRILLIC CAPITAL LETTER ZHE
+0417;afii10025;CYRILLIC CAPITAL LETTER ZE
+0418;afii10026;CYRILLIC CAPITAL LETTER I
+0419;afii10027;CYRILLIC CAPITAL LETTER SHORT I
+041A;afii10028;CYRILLIC CAPITAL LETTER KA
+041B;afii10029;CYRILLIC CAPITAL LETTER EL
+041C;afii10030;CYRILLIC CAPITAL LETTER EM
+041D;afii10031;CYRILLIC CAPITAL LETTER EN
+041E;afii10032;CYRILLIC CAPITAL LETTER O
+041F;afii10033;CYRILLIC CAPITAL LETTER PE
+0420;afii10034;CYRILLIC CAPITAL LETTER ER
+0421;afii10035;CYRILLIC CAPITAL LETTER ES
+0422;afii10036;CYRILLIC CAPITAL LETTER TE
+0423;afii10037;CYRILLIC CAPITAL LETTER U
+0424;afii10038;CYRILLIC CAPITAL LETTER EF
+0425;afii10039;CYRILLIC CAPITAL LETTER HA
+0426;afii10040;CYRILLIC CAPITAL LETTER TSE
+0427;afii10041;CYRILLIC CAPITAL LETTER CHE
+0428;afii10042;CYRILLIC CAPITAL LETTER SHA
+0429;afii10043;CYRILLIC CAPITAL LETTER SHCHA
+042A;afii10044;CYRILLIC CAPITAL LETTER HARD SIGN
+042B;afii10045;CYRILLIC CAPITAL LETTER YERU
+042C;afii10046;CYRILLIC CAPITAL LETTER SOFT SIGN
+042D;afii10047;CYRILLIC CAPITAL LETTER E
+042E;afii10048;CYRILLIC CAPITAL LETTER YU
+042F;afii10049;CYRILLIC CAPITAL LETTER YA
+0490;afii10050;CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+0402;afii10051;CYRILLIC CAPITAL LETTER DJE
+0403;afii10052;CYRILLIC CAPITAL LETTER GJE
+0404;afii10053;CYRILLIC CAPITAL LETTER UKRAINIAN IE
+0405;afii10054;CYRILLIC CAPITAL LETTER DZE
+0406;afii10055;CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+0407;afii10056;CYRILLIC CAPITAL LETTER YI
+0408;afii10057;CYRILLIC CAPITAL LETTER JE
+0409;afii10058;CYRILLIC CAPITAL LETTER LJE
+040A;afii10059;CYRILLIC CAPITAL LETTER NJE
+040B;afii10060;CYRILLIC CAPITAL LETTER TSHE
+040C;afii10061;CYRILLIC CAPITAL LETTER KJE
+040E;afii10062;CYRILLIC CAPITAL LETTER SHORT U
+F6C4;afii10063;CYRILLIC SMALL LETTER GHE VARIANT
+F6C5;afii10064;CYRILLIC SMALL LETTER BE VARIANT
+0430;afii10065;CYRILLIC SMALL LETTER A
+0431;afii10066;CYRILLIC SMALL LETTER BE
+0432;afii10067;CYRILLIC SMALL LETTER VE
+0433;afii10068;CYRILLIC SMALL LETTER GHE
+0434;afii10069;CYRILLIC SMALL LETTER DE
+0435;afii10070;CYRILLIC SMALL LETTER IE
+0451;afii10071;CYRILLIC SMALL LETTER IO
+0436;afii10072;CYRILLIC SMALL LETTER ZHE
+0437;afii10073;CYRILLIC SMALL LETTER ZE
+0438;afii10074;CYRILLIC SMALL LETTER I
+0439;afii10075;CYRILLIC SMALL LETTER SHORT I
+043A;afii10076;CYRILLIC SMALL LETTER KA
+043B;afii10077;CYRILLIC SMALL LETTER EL
+043C;afii10078;CYRILLIC SMALL LETTER EM
+043D;afii10079;CYRILLIC SMALL LETTER EN
+043E;afii10080;CYRILLIC SMALL LETTER O
+043F;afii10081;CYRILLIC SMALL LETTER PE
+0440;afii10082;CYRILLIC SMALL LETTER ER
+0441;afii10083;CYRILLIC SMALL LETTER ES
+0442;afii10084;CYRILLIC SMALL LETTER TE
+0443;afii10085;CYRILLIC SMALL LETTER U
+0444;afii10086;CYRILLIC SMALL LETTER EF
+0445;afii10087;CYRILLIC SMALL LETTER HA
+0446;afii10088;CYRILLIC SMALL LETTER TSE
+0447;afii10089;CYRILLIC SMALL LETTER CHE
+0448;afii10090;CYRILLIC SMALL LETTER SHA
+0449;afii10091;CYRILLIC SMALL LETTER SHCHA
+044A;afii10092;CYRILLIC SMALL LETTER HARD SIGN
+044B;afii10093;CYRILLIC SMALL LETTER YERU
+044C;afii10094;CYRILLIC SMALL LETTER SOFT SIGN
+044D;afii10095;CYRILLIC SMALL LETTER E
+044E;afii10096;CYRILLIC SMALL LETTER YU
+044F;afii10097;CYRILLIC SMALL LETTER YA
+0491;afii10098;CYRILLIC SMALL LETTER GHE WITH UPTURN
+0452;afii10099;CYRILLIC SMALL LETTER DJE
+0453;afii10100;CYRILLIC SMALL LETTER GJE
+0454;afii10101;CYRILLIC SMALL LETTER UKRAINIAN IE
+0455;afii10102;CYRILLIC SMALL LETTER DZE
+0456;afii10103;CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+0457;afii10104;CYRILLIC SMALL LETTER YI
+0458;afii10105;CYRILLIC SMALL LETTER JE
+0459;afii10106;CYRILLIC SMALL LETTER LJE
+045A;afii10107;CYRILLIC SMALL LETTER NJE
+045B;afii10108;CYRILLIC SMALL LETTER TSHE
+045C;afii10109;CYRILLIC SMALL LETTER KJE
+045E;afii10110;CYRILLIC SMALL LETTER SHORT U
+040F;afii10145;CYRILLIC CAPITAL LETTER DZHE
+0462;afii10146;CYRILLIC CAPITAL LETTER YAT
+0472;afii10147;CYRILLIC CAPITAL LETTER FITA
+0474;afii10148;CYRILLIC CAPITAL LETTER IZHITSA
+F6C6;afii10192;CYRILLIC SMALL LETTER DE VARIANT
+045F;afii10193;CYRILLIC SMALL LETTER DZHE
+0463;afii10194;CYRILLIC SMALL LETTER YAT
+0473;afii10195;CYRILLIC SMALL LETTER FITA
+0475;afii10196;CYRILLIC SMALL LETTER IZHITSA
+F6C7;afii10831;CYRILLIC SMALL LETTER PE VARIANT
+F6C8;afii10832;CYRILLIC SMALL LETTER TE VARIANT
+04D9;afii10846;CYRILLIC SMALL LETTER SCHWA
+200E;afii299;LEFT-TO-RIGHT MARK
+200F;afii300;RIGHT-TO-LEFT MARK
+200D;afii301;ZERO WIDTH JOINER
+066A;afii57381;ARABIC PERCENT SIGN
+060C;afii57388;ARABIC COMMA
+0660;afii57392;ARABIC-INDIC DIGIT ZERO
+0661;afii57393;ARABIC-INDIC DIGIT ONE
+0662;afii57394;ARABIC-INDIC DIGIT TWO
+0663;afii57395;ARABIC-INDIC DIGIT THREE
+0664;afii57396;ARABIC-INDIC DIGIT FOUR
+0665;afii57397;ARABIC-INDIC DIGIT FIVE
+0666;afii57398;ARABIC-INDIC DIGIT SIX
+0667;afii57399;ARABIC-INDIC DIGIT SEVEN
+0668;afii57400;ARABIC-INDIC DIGIT EIGHT
+0669;afii57401;ARABIC-INDIC DIGIT NINE
+061B;afii57403;ARABIC SEMICOLON
+061F;afii57407;ARABIC QUESTION MARK
+0621;afii57409;ARABIC LETTER HAMZA
+0622;afii57410;ARABIC LETTER ALEF WITH MADDA ABOVE
+0623;afii57411;ARABIC LETTER ALEF WITH HAMZA ABOVE
+0624;afii57412;ARABIC LETTER WAW WITH HAMZA ABOVE
+0625;afii57413;ARABIC LETTER ALEF WITH HAMZA BELOW
+0626;afii57414;ARABIC LETTER YEH WITH HAMZA ABOVE
+0627;afii57415;ARABIC LETTER ALEF
+0628;afii57416;ARABIC LETTER BEH
+0629;afii57417;ARABIC LETTER TEH MARBUTA
+062A;afii57418;ARABIC LETTER TEH
+062B;afii57419;ARABIC LETTER THEH
+062C;afii57420;ARABIC LETTER JEEM
+062D;afii57421;ARABIC LETTER HAH
+062E;afii57422;ARABIC LETTER KHAH
+062F;afii57423;ARABIC LETTER DAL
+0630;afii57424;ARABIC LETTER THAL
+0631;afii57425;ARABIC LETTER REH
+0632;afii57426;ARABIC LETTER ZAIN
+0633;afii57427;ARABIC LETTER SEEN
+0634;afii57428;ARABIC LETTER SHEEN
+0635;afii57429;ARABIC LETTER SAD
+0636;afii57430;ARABIC LETTER DAD
+0637;afii57431;ARABIC LETTER TAH
+0638;afii57432;ARABIC LETTER ZAH
+0639;afii57433;ARABIC LETTER AIN
+063A;afii57434;ARABIC LETTER GHAIN
+0640;afii57440;ARABIC TATWEEL
+0641;afii57441;ARABIC LETTER FEH
+0642;afii57442;ARABIC LETTER QAF
+0643;afii57443;ARABIC LETTER KAF
+0644;afii57444;ARABIC LETTER LAM
+0645;afii57445;ARABIC LETTER MEEM
+0646;afii57446;ARABIC LETTER NOON
+0648;afii57448;ARABIC LETTER WAW
+0649;afii57449;ARABIC LETTER ALEF MAKSURA
+064A;afii57450;ARABIC LETTER YEH
+064B;afii57451;ARABIC FATHATAN
+064C;afii57452;ARABIC DAMMATAN
+064D;afii57453;ARABIC KASRATAN
+064E;afii57454;ARABIC FATHA
+064F;afii57455;ARABIC DAMMA
+0650;afii57456;ARABIC KASRA
+0651;afii57457;ARABIC SHADDA
+0652;afii57458;ARABIC SUKUN
+0647;afii57470;ARABIC LETTER HEH
+06A4;afii57505;ARABIC LETTER VEH
+067E;afii57506;ARABIC LETTER PEH
+0686;afii57507;ARABIC LETTER TCHEH
+0698;afii57508;ARABIC LETTER JEH
+06AF;afii57509;ARABIC LETTER GAF
+0679;afii57511;ARABIC LETTER TTEH
+0688;afii57512;ARABIC LETTER DDAL
+0691;afii57513;ARABIC LETTER RREH
+06BA;afii57514;ARABIC LETTER NOON GHUNNA
+06D2;afii57519;ARABIC LETTER YEH BARREE
+06D5;afii57534;ARABIC LETTER AE
+20AA;afii57636;NEW SHEQEL SIGN
+05BE;afii57645;HEBREW PUNCTUATION MAQAF
+05C3;afii57658;HEBREW PUNCTUATION SOF PASUQ
+05D0;afii57664;HEBREW LETTER ALEF
+05D1;afii57665;HEBREW LETTER BET
+05D2;afii57666;HEBREW LETTER GIMEL
+05D3;afii57667;HEBREW LETTER DALET
+05D4;afii57668;HEBREW LETTER HE
+05D5;afii57669;HEBREW LETTER VAV
+05D6;afii57670;HEBREW LETTER ZAYIN
+05D7;afii57671;HEBREW LETTER HET
+05D8;afii57672;HEBREW LETTER TET
+05D9;afii57673;HEBREW LETTER YOD
+05DA;afii57674;HEBREW LETTER FINAL KAF
+05DB;afii57675;HEBREW LETTER KAF
+05DC;afii57676;HEBREW LETTER LAMED
+05DD;afii57677;HEBREW LETTER FINAL MEM
+05DE;afii57678;HEBREW LETTER MEM
+05DF;afii57679;HEBREW LETTER FINAL NUN
+05E0;afii57680;HEBREW LETTER NUN
+05E1;afii57681;HEBREW LETTER SAMEKH
+05E2;afii57682;HEBREW LETTER AYIN
+05E3;afii57683;HEBREW LETTER FINAL PE
+05E4;afii57684;HEBREW LETTER PE
+05E5;afii57685;HEBREW LETTER FINAL TSADI
+05E6;afii57686;HEBREW LETTER TSADI
+05E7;afii57687;HEBREW LETTER QOF
+05E8;afii57688;HEBREW LETTER RESH
+05E9;afii57689;HEBREW LETTER SHIN
+05EA;afii57690;HEBREW LETTER TAV
+FB2A;afii57694;HEBREW LETTER SHIN WITH SHIN DOT
+FB2B;afii57695;HEBREW LETTER SHIN WITH SIN DOT
+FB4B;afii57700;HEBREW LETTER VAV WITH HOLAM
+FB1F;afii57705;HEBREW LIGATURE YIDDISH YOD YOD PATAH
+05F0;afii57716;HEBREW LIGATURE YIDDISH DOUBLE VAV
+05F1;afii57717;HEBREW LIGATURE YIDDISH VAV YOD
+05F2;afii57718;HEBREW LIGATURE YIDDISH DOUBLE YOD
+FB35;afii57723;HEBREW LETTER VAV WITH DAGESH
+05B4;afii57793;HEBREW POINT HIRIQ
+05B5;afii57794;HEBREW POINT TSERE
+05B6;afii57795;HEBREW POINT SEGOL
+05BB;afii57796;HEBREW POINT QUBUTS
+05B8;afii57797;HEBREW POINT QAMATS
+05B7;afii57798;HEBREW POINT PATAH
+05B0;afii57799;HEBREW POINT SHEVA
+05B2;afii57800;HEBREW POINT HATAF PATAH
+05B1;afii57801;HEBREW POINT HATAF SEGOL
+05B3;afii57802;HEBREW POINT HATAF QAMATS
+05C2;afii57803;HEBREW POINT SIN DOT
+05C1;afii57804;HEBREW POINT SHIN DOT
+05B9;afii57806;HEBREW POINT HOLAM
+05BC;afii57807;HEBREW POINT DAGESH OR MAPIQ
+05BD;afii57839;HEBREW POINT METEG
+05BF;afii57841;HEBREW POINT RAFE
+05C0;afii57842;HEBREW PUNCTUATION PASEQ
+02BC;afii57929;MODIFIER LETTER APOSTROPHE
+2105;afii61248;CARE OF
+2113;afii61289;SCRIPT SMALL L
+2116;afii61352;NUMERO SIGN
+202C;afii61573;POP DIRECTIONAL FORMATTING
+202D;afii61574;LEFT-TO-RIGHT OVERRIDE
+202E;afii61575;RIGHT-TO-LEFT OVERRIDE
+200C;afii61664;ZERO WIDTH NON-JOINER
+066D;afii63167;ARABIC FIVE POINTED STAR
+02BD;afii64937;MODIFIER LETTER REVERSED COMMA
+00E0;agrave;LATIN SMALL LETTER A WITH GRAVE
+2135;aleph;ALEF SYMBOL
+03B1;alpha;GREEK SMALL LETTER ALPHA
+03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS
+0101;amacron;LATIN SMALL LETTER A WITH MACRON
+0026;ampersand;AMPERSAND
+F726;ampersandsmall;SMALL CAPITAL AMPERSAND
+2220;angle;ANGLE
+2329;angleleft;LEFT-POINTING ANGLE BRACKET
+232A;angleright;RIGHT-POINTING ANGLE BRACKET
+0387;anoteleia;GREEK ANO TELEIA
+0105;aogonek;LATIN SMALL LETTER A WITH OGONEK
+2248;approxequal;ALMOST EQUAL TO
+00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE
+01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
+2194;arrowboth;LEFT RIGHT ARROW
+21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW
+21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW
+21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW
+21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW
+21D1;arrowdblup;UPWARDS DOUBLE ARROW
+2193;arrowdown;DOWNWARDS ARROW
+F8E7;arrowhorizex;HORIZONTAL ARROW EXTENDER
+2190;arrowleft;LEFTWARDS ARROW
+2192;arrowright;RIGHTWARDS ARROW
+2191;arrowup;UPWARDS ARROW
+2195;arrowupdn;UP DOWN ARROW
+21A8;arrowupdnbse;UP DOWN ARROW WITH BASE
+F8E6;arrowvertex;VERTICAL ARROW EXTENDER
+005E;asciicircum;CIRCUMFLEX ACCENT
+007E;asciitilde;TILDE
+002A;asterisk;ASTERISK
+2217;asteriskmath;ASTERISK OPERATOR
+F6E9;asuperior;SUPERSCRIPT LATIN SMALL LETTER A
+0040;at;COMMERCIAL AT
+00E3;atilde;LATIN SMALL LETTER A WITH TILDE
+0062;b;LATIN SMALL LETTER B
+005C;backslash;REVERSE SOLIDUS
+007C;bar;VERTICAL LINE
+03B2;beta;GREEK SMALL LETTER BETA
+2588;block;FULL BLOCK
+F8F4;braceex;CURLY BRACKET EXTENDER
+007B;braceleft;LEFT CURLY BRACKET
+F8F3;braceleftbt;LEFT CURLY BRACKET BOTTOM
+F8F2;braceleftmid;LEFT CURLY BRACKET MID
+F8F1;bracelefttp;LEFT CURLY BRACKET TOP
+007D;braceright;RIGHT CURLY BRACKET
+F8FE;bracerightbt;RIGHT CURLY BRACKET BOTTOM
+F8FD;bracerightmid;RIGHT CURLY BRACKET MID
+F8FC;bracerighttp;RIGHT CURLY BRACKET TOP
+005B;bracketleft;LEFT SQUARE BRACKET
+F8F0;bracketleftbt;LEFT SQUARE BRACKET BOTTOM
+F8EF;bracketleftex;LEFT SQUARE BRACKET EXTENDER
+F8EE;bracketlefttp;LEFT SQUARE BRACKET TOP
+005D;bracketright;RIGHT SQUARE BRACKET
+F8FB;bracketrightbt;RIGHT SQUARE BRACKET BOTTOM
+F8FA;bracketrightex;RIGHT SQUARE BRACKET EXTENDER
+F8F9;bracketrighttp;RIGHT SQUARE BRACKET TOP
+02D8;breve;BREVE
+00A6;brokenbar;BROKEN BAR
+F6EA;bsuperior;SUPERSCRIPT LATIN SMALL LETTER B
+2022;bullet;BULLET
+0063;c;LATIN SMALL LETTER C
+0107;cacute;LATIN SMALL LETTER C WITH ACUTE
+02C7;caron;CARON
+21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS
+010D;ccaron;LATIN SMALL LETTER C WITH CARON
+00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA
+0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX
+010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE
+00B8;cedilla;CEDILLA
+00A2;cent;CENT SIGN
+F6DF;centinferior;SUBSCRIPT CENT SIGN
+F7A2;centoldstyle;OLDSTYLE CENT SIGN
+F6E0;centsuperior;SUPERSCRIPT CENT SIGN
+03C7;chi;GREEK SMALL LETTER CHI
+25CB;circle;WHITE CIRCLE
+2297;circlemultiply;CIRCLED TIMES
+2295;circleplus;CIRCLED PLUS
+02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT
+2663;club;BLACK CLUB SUIT
+003A;colon;COLON
+20A1;colonmonetary;COLON SIGN
+002C;comma;COMMA
+F6C3;commaaccent;COMMA BELOW
+F6E1;commainferior;SUBSCRIPT COMMA
+F6E2;commasuperior;SUPERSCRIPT COMMA
+2245;congruent;APPROXIMATELY EQUAL TO
+00A9;copyright;COPYRIGHT SIGN
+F8E9;copyrightsans;COPYRIGHT SIGN SANS SERIF
+F6D9;copyrightserif;COPYRIGHT SIGN SERIF
+00A4;currency;CURRENCY SIGN
+F6D1;cyrBreve;CAPITAL CYRILLIC BREVE
+F6D2;cyrFlex;CAPITAL CYRILLIC CIRCUMFLEX
+F6D4;cyrbreve;CYRILLIC BREVE
+F6D5;cyrflex;CYRILLIC CIRCUMFLEX
+0064;d;LATIN SMALL LETTER D
+2020;dagger;DAGGER
+2021;daggerdbl;DOUBLE DAGGER
+F6D3;dblGrave;CAPITAL DOUBLE GRAVE ACCENT
+F6D6;dblgrave;DOUBLE GRAVE ACCENT
+010F;dcaron;LATIN SMALL LETTER D WITH CARON
+0111;dcroat;LATIN SMALL LETTER D WITH STROKE
+00B0;degree;DEGREE SIGN
+03B4;delta;GREEK SMALL LETTER DELTA
+2666;diamond;BLACK DIAMOND SUIT
+00A8;dieresis;DIAERESIS
+F6D7;dieresisacute;DIAERESIS ACUTE ACCENT
+F6D8;dieresisgrave;DIAERESIS GRAVE ACCENT
+0385;dieresistonos;GREEK DIALYTIKA TONOS
+00F7;divide;DIVISION SIGN
+2593;dkshade;DARK SHADE
+2584;dnblock;LOWER HALF BLOCK
+0024;dollar;DOLLAR SIGN
+F6E3;dollarinferior;SUBSCRIPT DOLLAR SIGN
+F724;dollaroldstyle;OLDSTYLE DOLLAR SIGN
+F6E4;dollarsuperior;SUPERSCRIPT DOLLAR SIGN
+20AB;dong;DONG SIGN
+02D9;dotaccent;DOT ABOVE
+0323;dotbelowcomb;COMBINING DOT BELOW
+0131;dotlessi;LATIN SMALL LETTER DOTLESS I
+F6BE;dotlessj;LATIN SMALL LETTER DOTLESS J
+22C5;dotmath;DOT OPERATOR
+F6EB;dsuperior;SUPERSCRIPT LATIN SMALL LETTER D
+0065;e;LATIN SMALL LETTER E
+00E9;eacute;LATIN SMALL LETTER E WITH ACUTE
+0115;ebreve;LATIN SMALL LETTER E WITH BREVE
+011B;ecaron;LATIN SMALL LETTER E WITH CARON
+00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX
+00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS
+0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE
+00E8;egrave;LATIN SMALL LETTER E WITH GRAVE
+0038;eight;DIGIT EIGHT
+2088;eightinferior;SUBSCRIPT EIGHT
+F738;eightoldstyle;OLDSTYLE DIGIT EIGHT
+2078;eightsuperior;SUPERSCRIPT EIGHT
+2208;element;ELEMENT OF
+2026;ellipsis;HORIZONTAL ELLIPSIS
+0113;emacron;LATIN SMALL LETTER E WITH MACRON
+2014;emdash;EM DASH
+2205;emptyset;EMPTY SET
+2013;endash;EN DASH
+014B;eng;LATIN SMALL LETTER ENG
+0119;eogonek;LATIN SMALL LETTER E WITH OGONEK
+03B5;epsilon;GREEK SMALL LETTER EPSILON
+03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS
+003D;equal;EQUALS SIGN
+2261;equivalence;IDENTICAL TO
+212E;estimated;ESTIMATED SYMBOL
+F6EC;esuperior;SUPERSCRIPT LATIN SMALL LETTER E
+03B7;eta;GREEK SMALL LETTER ETA
+03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS
+00F0;eth;LATIN SMALL LETTER ETH
+0021;exclam;EXCLAMATION MARK
+203C;exclamdbl;DOUBLE EXCLAMATION MARK
+00A1;exclamdown;INVERTED EXCLAMATION MARK
+F7A1;exclamdownsmall;SMALL CAPITAL INVERTED EXCLAMATION MARK
+F721;exclamsmall;SMALL CAPITAL EXCLAMATION MARK
+2203;existential;THERE EXISTS
+0066;f;LATIN SMALL LETTER F
+2640;female;FEMALE SIGN
+FB00;ff;LATIN SMALL LIGATURE FF
+FB03;ffi;LATIN SMALL LIGATURE FFI
+FB04;ffl;LATIN SMALL LIGATURE FFL
+FB01;fi;LATIN SMALL LIGATURE FI
+2012;figuredash;FIGURE DASH
+25A0;filledbox;BLACK SQUARE
+25AC;filledrect;BLACK RECTANGLE
+0035;five;DIGIT FIVE
+215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS
+2085;fiveinferior;SUBSCRIPT FIVE
+F735;fiveoldstyle;OLDSTYLE DIGIT FIVE
+2075;fivesuperior;SUPERSCRIPT FIVE
+FB02;fl;LATIN SMALL LIGATURE FL
+0192;florin;LATIN SMALL LETTER F WITH HOOK
+0034;four;DIGIT FOUR
+2084;fourinferior;SUBSCRIPT FOUR
+F734;fouroldstyle;OLDSTYLE DIGIT FOUR
+2074;foursuperior;SUPERSCRIPT FOUR
+2044;fraction;FRACTION SLASH
+2215;fraction;DIVISION SLASH;Duplicate
+20A3;franc;FRENCH FRANC SIGN
+0067;g;LATIN SMALL LETTER G
+03B3;gamma;GREEK SMALL LETTER GAMMA
+011F;gbreve;LATIN SMALL LETTER G WITH BREVE
+01E7;gcaron;LATIN SMALL LETTER G WITH CARON
+011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX
+0123;gcommaaccent;LATIN SMALL LETTER G WITH CEDILLA
+0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE
+00DF;germandbls;LATIN SMALL LETTER SHARP S
+2207;gradient;NABLA
+0060;grave;GRAVE ACCENT
+0300;gravecomb;COMBINING GRAVE ACCENT
+003E;greater;GREATER-THAN SIGN
+2265;greaterequal;GREATER-THAN OR EQUAL TO
+00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+0068;h;LATIN SMALL LETTER H
+0127;hbar;LATIN SMALL LETTER H WITH STROKE
+0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX
+2665;heart;BLACK HEART SUIT
+0309;hookabovecomb;COMBINING HOOK ABOVE
+2302;house;HOUSE
+02DD;hungarumlaut;DOUBLE ACUTE ACCENT
+002D;hyphen;HYPHEN-MINUS
+00AD;hyphen;SOFT HYPHEN;Duplicate
+F6E5;hypheninferior;SUBSCRIPT HYPHEN-MINUS
+F6E6;hyphensuperior;SUPERSCRIPT HYPHEN-MINUS
+0069;i;LATIN SMALL LETTER I
+00ED;iacute;LATIN SMALL LETTER I WITH ACUTE
+012D;ibreve;LATIN SMALL LETTER I WITH BREVE
+00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX
+00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS
+00EC;igrave;LATIN SMALL LETTER I WITH GRAVE
+0133;ij;LATIN SMALL LIGATURE IJ
+012B;imacron;LATIN SMALL LETTER I WITH MACRON
+221E;infinity;INFINITY
+222B;integral;INTEGRAL
+2321;integralbt;BOTTOM HALF INTEGRAL
+F8F5;integralex;INTEGRAL EXTENDER
+2320;integraltp;TOP HALF INTEGRAL
+2229;intersection;INTERSECTION
+25D8;invbullet;INVERSE BULLET
+25D9;invcircle;INVERSE WHITE CIRCLE
+263B;invsmileface;BLACK SMILING FACE
+012F;iogonek;LATIN SMALL LETTER I WITH OGONEK
+03B9;iota;GREEK SMALL LETTER IOTA
+03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA
+0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS
+F6ED;isuperior;SUPERSCRIPT LATIN SMALL LETTER I
+0129;itilde;LATIN SMALL LETTER I WITH TILDE
+006A;j;LATIN SMALL LETTER J
+0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX
+006B;k;LATIN SMALL LETTER K
+03BA;kappa;GREEK SMALL LETTER KAPPA
+0137;kcommaaccent;LATIN SMALL LETTER K WITH CEDILLA
+0138;kgreenlandic;LATIN SMALL LETTER KRA
+006C;l;LATIN SMALL LETTER L
+013A;lacute;LATIN SMALL LETTER L WITH ACUTE
+03BB;lambda;GREEK SMALL LETTER LAMDA
+013E;lcaron;LATIN SMALL LETTER L WITH CARON
+013C;lcommaaccent;LATIN SMALL LETTER L WITH CEDILLA
+0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT
+003C;less;LESS-THAN SIGN
+2264;lessequal;LESS-THAN OR EQUAL TO
+258C;lfblock;LEFT HALF BLOCK
+20A4;lira;LIRA SIGN
+F6C0;ll;LATIN SMALL LETTER LL
+2227;logicaland;LOGICAL AND
+00AC;logicalnot;NOT SIGN
+2228;logicalor;LOGICAL OR
+017F;longs;LATIN SMALL LETTER LONG S
+25CA;lozenge;LOZENGE
+0142;lslash;LATIN SMALL LETTER L WITH STROKE
+F6EE;lsuperior;SUPERSCRIPT LATIN SMALL LETTER L
+2591;ltshade;LIGHT SHADE
+006D;m;LATIN SMALL LETTER M
+00AF;macron;MACRON
+02C9;macron;MODIFIER LETTER MACRON;Duplicate
+2642;male;MALE SIGN
+2212;minus;MINUS SIGN
+2032;minute;PRIME
+F6EF;msuperior;SUPERSCRIPT LATIN SMALL LETTER M
+00B5;mu;MICRO SIGN
+03BC;mu;GREEK SMALL LETTER MU;Duplicate
+00D7;multiply;MULTIPLICATION SIGN
+266A;musicalnote;EIGHTH NOTE
+266B;musicalnotedbl;BEAMED EIGHTH NOTES
+006E;n;LATIN SMALL LETTER N
+0144;nacute;LATIN SMALL LETTER N WITH ACUTE
+0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
+0148;ncaron;LATIN SMALL LETTER N WITH CARON
+0146;ncommaaccent;LATIN SMALL LETTER N WITH CEDILLA
+0039;nine;DIGIT NINE
+2089;nineinferior;SUBSCRIPT NINE
+F739;nineoldstyle;OLDSTYLE DIGIT NINE
+2079;ninesuperior;SUPERSCRIPT NINE
+2209;notelement;NOT AN ELEMENT OF
+2260;notequal;NOT EQUAL TO
+2284;notsubset;NOT A SUBSET OF
+207F;nsuperior;SUPERSCRIPT LATIN SMALL LETTER N
+00F1;ntilde;LATIN SMALL LETTER N WITH TILDE
+03BD;nu;GREEK SMALL LETTER NU
+0023;numbersign;NUMBER SIGN
+006F;o;LATIN SMALL LETTER O
+00F3;oacute;LATIN SMALL LETTER O WITH ACUTE
+014F;obreve;LATIN SMALL LETTER O WITH BREVE
+00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX
+00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS
+0153;oe;LATIN SMALL LIGATURE OE
+02DB;ogonek;OGONEK
+00F2;ograve;LATIN SMALL LETTER O WITH GRAVE
+01A1;ohorn;LATIN SMALL LETTER O WITH HORN
+0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE
+014D;omacron;LATIN SMALL LETTER O WITH MACRON
+03C9;omega;GREEK SMALL LETTER OMEGA
+03D6;omega1;GREEK PI SYMBOL
+03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS
+03BF;omicron;GREEK SMALL LETTER OMICRON
+03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS
+0031;one;DIGIT ONE
+2024;onedotenleader;ONE DOT LEADER
+215B;oneeighth;VULGAR FRACTION ONE EIGHTH
+F6DC;onefitted;PROPORTIONAL DIGIT ONE
+00BD;onehalf;VULGAR FRACTION ONE HALF
+2081;oneinferior;SUBSCRIPT ONE
+F731;oneoldstyle;OLDSTYLE DIGIT ONE
+00BC;onequarter;VULGAR FRACTION ONE QUARTER
+00B9;onesuperior;SUPERSCRIPT ONE
+2153;onethird;VULGAR FRACTION ONE THIRD
+25E6;openbullet;WHITE BULLET
+00AA;ordfeminine;FEMININE ORDINAL INDICATOR
+00BA;ordmasculine;MASCULINE ORDINAL INDICATOR
+221F;orthogonal;RIGHT ANGLE
+00F8;oslash;LATIN SMALL LETTER O WITH STROKE
+01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE
+F6F0;osuperior;SUPERSCRIPT LATIN SMALL LETTER O
+00F5;otilde;LATIN SMALL LETTER O WITH TILDE
+0070;p;LATIN SMALL LETTER P
+00B6;paragraph;PILCROW SIGN
+0028;parenleft;LEFT PARENTHESIS
+F8ED;parenleftbt;LEFT PAREN BOTTOM
+F8EC;parenleftex;LEFT PAREN EXTENDER
+208D;parenleftinferior;SUBSCRIPT LEFT PARENTHESIS
+207D;parenleftsuperior;SUPERSCRIPT LEFT PARENTHESIS
+F8EB;parenlefttp;LEFT PAREN TOP
+0029;parenright;RIGHT PARENTHESIS
+F8F8;parenrightbt;RIGHT PAREN BOTTOM
+F8F7;parenrightex;RIGHT PAREN EXTENDER
+208E;parenrightinferior;SUBSCRIPT RIGHT PARENTHESIS
+207E;parenrightsuperior;SUPERSCRIPT RIGHT PARENTHESIS
+F8F6;parenrighttp;RIGHT PAREN TOP
+2202;partialdiff;PARTIAL DIFFERENTIAL
+0025;percent;PERCENT SIGN
+002E;period;FULL STOP
+00B7;periodcentered;MIDDLE DOT
+2219;periodcentered;BULLET OPERATOR;Duplicate
+F6E7;periodinferior;SUBSCRIPT FULL STOP
+F6E8;periodsuperior;SUPERSCRIPT FULL STOP
+22A5;perpendicular;UP TACK
+2030;perthousand;PER MILLE SIGN
+20A7;peseta;PESETA SIGN
+03C6;phi;GREEK SMALL LETTER PHI
+03D5;phi1;GREEK PHI SYMBOL
+03C0;pi;GREEK SMALL LETTER PI
+002B;plus;PLUS SIGN
+00B1;plusminus;PLUS-MINUS SIGN
+211E;prescription;PRESCRIPTION TAKE
+220F;product;N-ARY PRODUCT
+2282;propersubset;SUBSET OF
+2283;propersuperset;SUPERSET OF
+221D;proportional;PROPORTIONAL TO
+03C8;psi;GREEK SMALL LETTER PSI
+0071;q;LATIN SMALL LETTER Q
+003F;question;QUESTION MARK
+00BF;questiondown;INVERTED QUESTION MARK
+F7BF;questiondownsmall;SMALL CAPITAL INVERTED QUESTION MARK
+F73F;questionsmall;SMALL CAPITAL QUESTION MARK
+0022;quotedbl;QUOTATION MARK
+201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK
+201C;quotedblleft;LEFT DOUBLE QUOTATION MARK
+201D;quotedblright;RIGHT DOUBLE QUOTATION MARK
+2018;quoteleft;LEFT SINGLE QUOTATION MARK
+201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK
+2019;quoteright;RIGHT SINGLE QUOTATION MARK
+201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK
+0027;quotesingle;APOSTROPHE
+0072;r;LATIN SMALL LETTER R
+0155;racute;LATIN SMALL LETTER R WITH ACUTE
+221A;radical;SQUARE ROOT
+F8E5;radicalex;RADICAL EXTENDER
+0159;rcaron;LATIN SMALL LETTER R WITH CARON
+0157;rcommaaccent;LATIN SMALL LETTER R WITH CEDILLA
+2286;reflexsubset;SUBSET OF OR EQUAL TO
+2287;reflexsuperset;SUPERSET OF OR EQUAL TO
+00AE;registered;REGISTERED SIGN
+F8E8;registersans;REGISTERED SIGN SANS SERIF
+F6DA;registerserif;REGISTERED SIGN SERIF
+2310;revlogicalnot;REVERSED NOT SIGN
+03C1;rho;GREEK SMALL LETTER RHO
+02DA;ring;RING ABOVE
+F6F1;rsuperior;SUPERSCRIPT LATIN SMALL LETTER R
+2590;rtblock;RIGHT HALF BLOCK
+F6DD;rupiah;RUPIAH SIGN
+0073;s;LATIN SMALL LETTER S
+015B;sacute;LATIN SMALL LETTER S WITH ACUTE
+0161;scaron;LATIN SMALL LETTER S WITH CARON
+015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA
+F6C2;scedilla;LATIN SMALL LETTER S WITH CEDILLA;Duplicate
+015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX
+0219;scommaaccent;LATIN SMALL LETTER S WITH COMMA BELOW
+2033;second;DOUBLE PRIME
+00A7;section;SECTION SIGN
+003B;semicolon;SEMICOLON
+0037;seven;DIGIT SEVEN
+215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS
+2087;seveninferior;SUBSCRIPT SEVEN
+F737;sevenoldstyle;OLDSTYLE DIGIT SEVEN
+2077;sevensuperior;SUPERSCRIPT SEVEN
+2592;shade;MEDIUM SHADE
+03C3;sigma;GREEK SMALL LETTER SIGMA
+03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA
+223C;similar;TILDE OPERATOR
+0036;six;DIGIT SIX
+2086;sixinferior;SUBSCRIPT SIX
+F736;sixoldstyle;OLDSTYLE DIGIT SIX
+2076;sixsuperior;SUPERSCRIPT SIX
+002F;slash;SOLIDUS
+263A;smileface;WHITE SMILING FACE
+0020;space;SPACE
+00A0;space;NO-BREAK SPACE;Duplicate
+2660;spade;BLACK SPADE SUIT
+F6F2;ssuperior;SUPERSCRIPT LATIN SMALL LETTER S
+00A3;sterling;POUND SIGN
+220B;suchthat;CONTAINS AS MEMBER
+2211;summation;N-ARY SUMMATION
+263C;sun;WHITE SUN WITH RAYS
+0074;t;LATIN SMALL LETTER T
+03C4;tau;GREEK SMALL LETTER TAU
+0167;tbar;LATIN SMALL LETTER T WITH STROKE
+0165;tcaron;LATIN SMALL LETTER T WITH CARON
+0163;tcommaaccent;LATIN SMALL LETTER T WITH CEDILLA
+021B;tcommaaccent;LATIN SMALL LETTER T WITH COMMA BELOW;Duplicate
+2234;therefore;THEREFORE
+03B8;theta;GREEK SMALL LETTER THETA
+03D1;theta1;GREEK THETA SYMBOL
+00FE;thorn;LATIN SMALL LETTER THORN
+0033;three;DIGIT THREE
+215C;threeeighths;VULGAR FRACTION THREE EIGHTHS
+2083;threeinferior;SUBSCRIPT THREE
+F733;threeoldstyle;OLDSTYLE DIGIT THREE
+00BE;threequarters;VULGAR FRACTION THREE QUARTERS
+F6DE;threequartersemdash;THREE QUARTERS EM DASH
+00B3;threesuperior;SUPERSCRIPT THREE
+02DC;tilde;SMALL TILDE
+0303;tildecomb;COMBINING TILDE
+0384;tonos;GREEK TONOS
+2122;trademark;TRADE MARK SIGN
+F8EA;trademarksans;TRADE MARK SIGN SANS SERIF
+F6DB;trademarkserif;TRADE MARK SIGN SERIF
+25BC;triagdn;BLACK DOWN-POINTING TRIANGLE
+25C4;triaglf;BLACK LEFT-POINTING POINTER
+25BA;triagrt;BLACK RIGHT-POINTING POINTER
+25B2;triagup;BLACK UP-POINTING TRIANGLE
+F6F3;tsuperior;SUPERSCRIPT LATIN SMALL LETTER T
+0032;two;DIGIT TWO
+2025;twodotenleader;TWO DOT LEADER
+2082;twoinferior;SUBSCRIPT TWO
+F732;twooldstyle;OLDSTYLE DIGIT TWO
+00B2;twosuperior;SUPERSCRIPT TWO
+2154;twothirds;VULGAR FRACTION TWO THIRDS
+0075;u;LATIN SMALL LETTER U
+00FA;uacute;LATIN SMALL LETTER U WITH ACUTE
+016D;ubreve;LATIN SMALL LETTER U WITH BREVE
+00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX
+00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS
+00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE
+01B0;uhorn;LATIN SMALL LETTER U WITH HORN
+0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE
+016B;umacron;LATIN SMALL LETTER U WITH MACRON
+005F;underscore;LOW LINE
+2017;underscoredbl;DOUBLE LOW LINE
+222A;union;UNION
+2200;universal;FOR ALL
+0173;uogonek;LATIN SMALL LETTER U WITH OGONEK
+2580;upblock;UPPER HALF BLOCK
+03C5;upsilon;GREEK SMALL LETTER UPSILON
+03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS
+016F;uring;LATIN SMALL LETTER U WITH RING ABOVE
+0169;utilde;LATIN SMALL LETTER U WITH TILDE
+0076;v;LATIN SMALL LETTER V
+0077;w;LATIN SMALL LETTER W
+1E83;wacute;LATIN SMALL LETTER W WITH ACUTE
+0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX
+1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS
+2118;weierstrass;SCRIPT CAPITAL P
+1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE
+0078;x;LATIN SMALL LETTER X
+03BE;xi;GREEK SMALL LETTER XI
+0079;y;LATIN SMALL LETTER Y
+00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE
+0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX
+00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS
+00A5;yen;YEN SIGN
+1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE
+007A;z;LATIN SMALL LETTER Z
+017A;zacute;LATIN SMALL LETTER Z WITH ACUTE
+017E;zcaron;LATIN SMALL LETTER Z WITH CARON
+017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE
+0030;zero;DIGIT ZERO
+2080;zeroinferior;SUBSCRIPT ZERO
+F730;zerooldstyle;OLDSTYLE DIGIT ZERO
+2070;zerosuperior;SUPERSCRIPT ZERO
+03B6;zeta;GREEK SMALL LETTER ZETA
+"""
+
+
+t1_bias    = 0
+glyph_list = []
+
+
+def the_adobe_glyph_list():
+  """return the list of glyph names in the adobe list"""
+
+  lines  = string.split( adobe_glyph_list, '\n' )
+  glyphs = []
+
+  for line in lines:
+    if line:
+      fields = string.split( line, ';' )
+#     print fields[0] + ' - ' + fields[1]
+      glyphs.append( fields[1] )
+
+  return glyphs
+
+
+def the_adobe_glyphs():
+  """return the list of unicode values"""
+
+  lines  = string.split( adobe_glyph_list, '\n' )
+  glyphs = []
+  values = []
+
+  for line in lines:
+    if line:
+      fields = string.split( line, ';' )
+#     print fields[0] + ' - ' + fields[1]
+      glyphs.append( fields[1] )
+      values.append( fields[0] )
+
+  return glyphs, values
+
+
+def count_extra_glyphs( alist, filter ):
+  """count the number of extra glyphs"""
+
+  count  = 0
+  extras = []
+
+  for name in alist:
+    try:
+      filtered_index = filter.index( name )
+    except:
+      extras.append( name )
+
+  return extras
+
+
+def dump_mac_indices( file, t1_bias ):
+  write = file.write
+
+  write( "  static const unsigned short  mac_standard_names[" + \
+        repr( len( mac_standard_names ) + 1 ) + "] =\n" )
+  write( "  {\n" )
+
+  count = 0
+  for name in mac_standard_names:
+    try:
+      t1_index = t1_standard_strings.index( name )
+      write( "    " + repr( t1_bias + t1_index ) + ",\n" )
+    except:
+      write( "    " + repr( count ) + ",\n" )
+      count = count + 1
+
+  write( "    0\n" )
+  write( "  };\n" )
+  write( "\n" )
+  write( "\n" )
+
+
+def dump_glyph_list( file, glyph_list, adobe_extra ):
+  write = file.write
+
+  name_list = []
+
+  write( "  static const char* const  standard_glyph_names[] =\n" )
+  write( "  {\n" )
+
+  for name in glyph_list:
+    write( '    "' + name + '",\n' )
+    name_list.append( name )
+
+  write( "\n" )
+  write( "#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST\n" )
+  write( "\n" )
+
+  for name in adobe_extra:
+    write( '    "' + name + '",\n' )
+    name_list.append( name )
+
+  write( "\n" )
+  write( "#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */\n" )
+  write( "\n" )
+  write( "    0\n" )
+  write( "  };\n" )
+  write( "\n" )
+  write( "\n" )
+
+  return name_list
+
+
+def dump_unicode_values( file, base_list, adobe_list ):
+  """build the glyph names to unicode values table"""
+
+  write = file.write
+
+  adobe_glyphs, uni_values = the_adobe_glyphs()
+
+  write( "\n" )
+  write( "  static const unsigned short  names_to_unicode[" + \
+          repr( len( base_list ) + len( adobe_list ) + 1 ) + "] =\n" )
+  write( "  {\n" )
+
+  for name in base_list:
+    try:
+      index = adobe_glyphs.index( name )
+      write( "    0x" + uni_values[index] + ",\n" )
+    except:
+      write( "    0,\n" )
+
+  write( "\n" )
+  write( "#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST\n" )
+  write( "\n" )
+
+  for name in adobe_list:
+    try:
+      index = adobe_glyphs.index( name )
+      write( "    0x" + uni_values[index] + ",\n" )
+    except:
+      write( "    0,\n" )
+
+  write( "\n" )
+  write( "#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */\n" )
+  write( "    0\n" )
+  write( "  };\n" )
+  write( "\n" )
+  write( "\n" )
+  write( "\n" )
+
+
+def dump_encoding( file, encoding_name, encoding_list ):
+  """dumps a given encoding"""
+
+  write = file.write
+
+  write( "  static const unsigned short  " + encoding_name + "[" + \
+          repr( len( encoding_list ) + 1 ) + "] =\n" )
+  write( "  {\n" )
+
+  for value in encoding_list:
+    write( "    " + repr( value ) + ",\n" )
+  write( "    0\n" )
+  write( "  };\n" )
+  write( "\n" )
+  write( "\n" )
+
+
+def main():
+  """main program body"""
+
+  if len( sys.argv ) != 2:
+    print __doc__ % sys.argv[0]
+    sys.exit( 1 )
+
+  file  = open( sys.argv[1], "w\n" )
+  write = file.write
+
+  count_sid = len( t1_standard_strings )
+
+  # build mac index table & supplemental glyph names
+  mac_list   = count_extra_glyphs( mac_standard_names, t1_standard_strings )
+  count_mac  = len( mac_list )
+  t1_bias    = count_mac
+  base_list  = mac_list + t1_standard_strings
+
+  # build adobe unicode index table & supplemental glyph names
+  adobe_list  = the_adobe_glyph_list()
+  adobe_list  = count_extra_glyphs( adobe_list, base_list )
+  count_adobe = len( adobe_list )
+
+  write( "/***************************************************************************/\n" )
+  write( "/*                                                                         */\n" )
+
+  write( "/*  %-71s*/\n" % sys.argv[1] )
+
+  write( "/*                                                                         */\n" )
+  write( "/*    PostScript glyph names (specification only).                         */\n" )
+  write( "/*                                                                         */\n" )
+  write( "/*  Copyright 2000 by                                                      */\n" )
+  write( "/*  David Turner, Robert Wilhelm, and Werner Lemberg.                      */\n" )
+  write( "/*                                                                         */\n" )
+  write( "/*  This file is part of the FreeType project, and may only be used,       */\n" )
+  write( "/*  modified, and distributed under the terms of the FreeType project      */\n" )
+  write( "/*  license, LICENSE.TXT.  By continuing to use, modify, or distribute     */\n" )
+  write( "/*  this file you indicate that you have read the license and              */\n" )
+  write( "/*  understand and accept it fully.                                        */\n" )
+  write( "/*                                                                         */\n" )
+  write( "/***************************************************************************/\n" )
+  write( "\n" )
+  write( "\n" )
+  write( "  /* this file has been generated automatically -- do not edit! */\n" )
+  write( "\n" )
+  write( "\n" )
+
+  # dump glyph list
+  name_list = dump_glyph_list( file, base_list, adobe_list )
+
+  # dump t1_standard_list
+  write( "  static const char* const * const  t1_standard_glyphs = " \
+          + "standard_glyph_names + " + repr( t1_bias ) + ";\n" )
+  write( "\n" )
+  write( "\n" )
+
+  write( "#define NUM_STD_GLYPHS " + repr( len( t1_standard_strings ) ) + "\n" )
+  write( "\n" )
+  write( "#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST\n" )
+  write( "#define NUM_ADOBE_GLYPHS " + \
+          repr( len( base_list ) + len( adobe_list ) - t1_bias ) + "\n" )
+  write( "#else\n" )
+  write( "#define NUM_ADOBE_GLYPHS " + \
+          repr( len( base_list ) - t1_bias )  + "\n" )
+  write( "#endif\n" )
+  write( "\n" )
+  write( "\n" )
+
+  # dump mac indices table
+  dump_mac_indices( file, t1_bias )
+
+  # discard mac names from base list
+  base_list = base_list[t1_bias:]
+
+  # dump unicode values table
+  dump_unicode_values( file, base_list, adobe_list )
+
+  dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
+  dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
+
+  write( "/* END */\n" )
+
+
+# Now run the main routine
+#
+main()
+
+
+# END
--- /dev/null
+++ b/src/tools/test_bbox.c
@@ -1,0 +1,160 @@
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_BBOX_H
+
+
+#include <time.h>    /* for clock() */
+
+/* SunOS 4.1.* does not define CLOCKS_PER_SEC, so include <sys/param.h> */
+/* to get the HZ macro which is the equivalent.                         */
+#if defined(__sun__) && !defined(SVR4) && !defined(__SVR4)
+#include <sys/param.h>
+#define CLOCKS_PER_SEC HZ
+#endif
+
+  static long
+  get_time( void )
+  {
+    return clock() * 10000L / CLOCKS_PER_SEC;
+  }
+
+
+
+
+  /* test bbox computations */
+  
+#define  XSCALE    65536
+#define  XX(x)     ((FT_Pos)(x*XSCALE))
+#define  XVEC(x,y)  { XX(x), XX(y) }
+#define  XVAL(x)   ((x)/(1.0*XSCALE))
+  
+  /* dummy outline #1 */
+  static FT_Vector  dummy_vec_1[4] =
+  {
+#if 1
+    XVEC( 408.9111, 535.3164 ),
+    XVEC( 455.8887, 634.396  ),
+    XVEC( -37.8765, 786.2207 ),
+    XVEC( 164.6074, 535.3164 )
+#else    
+    { (FT_Int32)0x0198E93DL , (FT_Int32)0x021750FFL },  /* 408.9111, 535.3164 */
+    { (FT_Int32)0x01C7E312L , (FT_Int32)0x027A6560L },  /* 455.8887, 634.3960 */
+    { (FT_Int32)0xFFDA1F9EL , (FT_Int32)0x0312387FL },  /* -37.8765, 786.2207 */
+    { (FT_Int32)0x00A49B7EL , (FT_Int32)0x021750FFL }   /* 164.6074, 535.3164 */
+#endif    
+   };
+  
+  static char  dummy_tag_1[4] =
+  {
+    FT_Curve_Tag_On,
+    FT_Curve_Tag_Cubic,
+    FT_Curve_Tag_Cubic,
+    FT_Curve_Tag_On
+  };
+
+  static short  dummy_contour_1[1] =
+  {
+    3
+  };
+  
+  static FT_Outline  dummy_outline_1 =
+  {
+    1,
+    4,
+    dummy_vec_1,
+    dummy_tag_1,
+    dummy_contour_1,
+    0
+  };
+
+
+  /* dummy outline #2 */
+  static FT_Vector  dummy_vec_2[4] =
+  {
+    XVEC( 100.0, 100.0 ),
+    XVEC( 100.0, 200.0 ),
+    XVEC( 200.0, 200.0 ),
+    XVEC( 200.0, 133.0 )
+  };
+  
+  static FT_Outline  dummy_outline_2 =
+  {
+    1,
+    4,
+    dummy_vec_2,
+    dummy_tag_1,
+    dummy_contour_1,
+    0
+  };
+
+
+  static void
+  dump_outline( FT_Outline*  outline )
+  {
+    FT_BBox  bbox;
+    
+    /* compute and display cbox */
+    FT_Outline_Get_CBox( outline, &bbox );
+    printf( "cbox = [%.2f %.2f %.2f %.2f]\n",
+             XVAL( bbox.xMin ),
+             XVAL( bbox.yMin ),
+             XVAL( bbox.xMax ),
+             XVAL( bbox.yMax ) );
+
+    /* compute and display bbox */
+    FT_Outline_Get_BBox( outline, &bbox );
+    printf( "bbox = [%.2f %.2f %.2f %.2f]\n",
+             XVAL( bbox.xMin ),
+             XVAL( bbox.yMin ),
+             XVAL( bbox.xMax ),
+             XVAL( bbox.yMax ) );
+  }
+
+
+
+  static void
+  profile_outline( FT_Outline*   outline,
+                   long          repeat )
+  {
+    FT_BBox  bbox;
+    long     count;
+    long     time0;
+    
+    time0 = get_time();
+    for ( count = repeat; count > 0; count-- )
+      FT_Outline_Get_CBox( outline, &bbox );
+      
+    time0 = get_time() - time0;      
+    printf( "time = %5.2f cbox = [%.2f %.2f %.2f %.2f]\n",
+             ((double)time0/10000.0),
+             XVAL( bbox.xMin ),
+             XVAL( bbox.yMin ),
+             XVAL( bbox.xMax ),
+             XVAL( bbox.yMax ) );
+
+
+    time0 = get_time();
+    for ( count = repeat; count > 0; count-- )
+      FT_Outline_Get_BBox( outline, &bbox );
+    
+    time0 = get_time() - time0;
+    printf( "time = %5.2f bbox = [%.2f %.2f %.2f %.2f]\n",
+             ((double)time0/10000.0),
+             XVAL( bbox.xMin ),
+             XVAL( bbox.yMin ),
+             XVAL( bbox.xMax ),
+             XVAL( bbox.yMax ) );
+  }
+
+#define REPEAT  100000L
+
+  int  main( int  argc, char**  argv )
+  {
+    printf( "outline #1\n" );
+    profile_outline( &dummy_outline_1, REPEAT );
+
+    printf( "outline #2\n" );
+    profile_outline( &dummy_outline_2, REPEAT );
+    return 0;
+  }
+
--- /dev/null
+++ b/src/tools/test_trig.c
@@ -1,0 +1,236 @@
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_TRIGONOMETRY_H
+
+#include <math.h>
+#include <stdio.h>
+
+#define  PI   3.14159265358979323846
+#define  SPI  (PI/FT_ANGLE_PI)
+
+/* the precision in 16.16 fixed float points of the checks. Expect */
+/* between 2 and 5 noise LSB bits during operations, due to        */
+/* rounding errors..                                               */
+#define  THRESHOLD  64
+
+  static  error = 0;
+
+  static void
+  test_cos( void )
+  {
+    FT_Fixed  f1, f2;
+    double    d1, d2;
+    int       i;
+    
+    for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
+    {
+      f1 = FT_Cos(i);
+      d1 = f1/65536.0;
+      d2 = cos( i*SPI );
+      f2 = (FT_Fixed)(d2*65536.0);
+
+      if ( abs( f2-f1 ) > THRESHOLD )
+      {
+        error = 1;
+        printf( "FT_Cos[%3d] = %.7f  cos[%3d] = %.7f\n",
+                (i >> 16), f1/65536.0, (i >> 16), d2 );
+      }
+    }
+  }
+
+
+
+  static void
+  test_sin( void )
+  {
+    FT_Fixed  f1, f2;
+    double    d1, d2;
+    int       i;
+    
+    for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
+    {
+      f1 = FT_Sin(i);
+      d1 = f1/65536.0;
+      d2 = sin( i*SPI );
+      f2 = (FT_Fixed)(d2*65536.0);
+
+      if ( abs( f2-f1 ) > THRESHOLD )
+      {
+        error = 1;
+        printf( "FT_Sin[%3d] = %.7f  sin[%3d] = %.7f\n",
+                (i >> 16), f1/65536.0, (i >> 16), d2 );
+      }
+    }
+  }
+
+
+  static void
+  test_tan( void )
+  {
+    FT_Fixed  f1, f2;
+    double    d1, d2;
+    int       i;
+    
+    for ( i = 0; i < FT_ANGLE_PI2-0x2000000; i += 0x10000 )
+    {
+      f1 = FT_Tan(i);
+      d1 = f1/65536.0;
+      d2 = tan( i*SPI );
+      f2 = (FT_Fixed)(d2*65536.0);
+
+      if ( abs( f2-f1 ) > THRESHOLD )
+      {
+        error = 1;
+        printf( "FT_Tan[%3d] = %.7f  tan[%3d] = %.7f\n",
+                (i >> 16), f1/65536.0, (i >> 16), d2 );
+      }
+    }
+  }
+
+
+  static void
+  test_atan2( void )
+  {
+    FT_Fixed  c2, s2;
+    double    l, a, c1, s1;
+    int       i, j;
+    
+    for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
+    {
+      l  = 5.0;
+      a  = i*SPI;
+      
+      c1 = l * cos(a);
+      s1 = l * sin(a);
+      
+      c2 = (FT_Fixed)(c1*65536.0);
+      s2 = (FT_Fixed)(s1*65536.0);
+      
+      j  = FT_Atan2( c2, s2 );
+      if ( j < 0 )
+        j += FT_ANGLE_2PI;
+      
+      if ( abs( i - j ) > 1 )
+      {
+        printf( "FT_Atan2( %.7f, %.7f ) = %.5f, atan = %.5f\n",
+                c2/65536.0, s2/65536.0, j/65536.0, i/65536.0 );
+      }
+    }
+  }
+  
+  static void
+  test_unit( void )
+  {
+    FT_Vector  v;
+    double     a, c1, s1;
+    FT_Fixed   c2, s2;
+    int        i;
+    
+    for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
+    {
+      FT_Vector_Unit( &v, i );
+      a  = ( i*SPI );
+      c1 = cos(a);
+      s1 = sin(a);
+      c2 = (FT_Fixed)(c1*65536.0);
+      s2 = (FT_Fixed)(s1*65536.0);
+      
+      if ( abs( v.x-c2 ) > THRESHOLD ||
+           abs( v.y-s2 ) > THRESHOLD )
+      {
+        error = 1;
+        printf( "FT_Vector_Unit[%3d] = ( %.7f, %.7f )  vec = ( %.7f, %.7f )\n",
+                (i >> 16),
+                v.x/65536.0, v.y/65536.0,
+                c1, s1 );
+      }
+    }
+  }
+  
+
+  static void
+  test_length( void )
+  {
+    FT_Vector  v;
+    FT_Fixed   l, l2;
+    int        i;
+    
+    for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
+    {
+      l   = (FT_Fixed)(500.0*65536.0);
+      v.x = (FT_Fixed)( l * cos( i*SPI ) );
+      v.y = (FT_Fixed)( l * sin( i*SPI ) );
+      l2  = FT_Vector_Length( &v );
+      
+      if ( abs( l2-l ) > THRESHOLD )
+      {
+        error = 1;
+        printf( "FT_Length( %.7f, %.7f ) = %.5f, length = %.5f\n",
+                v.x/65536.0, v.y/65536.0, l2/65536.0, l/65536.0 );
+      }
+    }
+  }
+
+
+  static void
+  test_rotate( void )
+  {
+    FT_Fixed  c2, s2, c4, s4;
+    FT_Vector v;
+    double    l, ra, a, c1, s1, cra, sra, c3, s3;
+    int       i, j, rotate;
+    
+    for ( rotate = 0; rotate < FT_ANGLE_2PI; rotate += 0x10000 )
+    {
+      ra  = rotate*SPI;
+      cra = cos( ra );
+      sra = sin( ra );
+      
+      for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
+      {
+        l  = 500.0;
+        a  = i*SPI;
+        
+        c1 = l * cos(a);
+        s1 = l * sin(a);
+        
+        v.x = c2 = (FT_Fixed)(c1*65536.0);
+        v.y = s2 = (FT_Fixed)(s1*65536.0);
+        
+        FT_Vector_Rotate( &v, rotate );
+        
+        c3 = c1 * cra - s1 * sra;
+        s3 = c1 * sra + s1 * cra;
+        
+        c4 = (FT_Fixed)(c3*65536.0);
+        s4 = (FT_Fixed)(s3*65536.0);
+        
+        if ( abs( c4 - v.x ) > THRESHOLD ||
+             abs( s4 - v.y ) > THRESHOLD )
+        {
+          error = 1;
+          printf( "FT_Rotate( (%.7f,%.7f), %.5f ) = ( %.7f, %.7f ), rot = ( %.7f, %.7f )\n",
+                  c1, s1, ra,
+                  c2/65536.0, s2/65536.0,
+                  c4/65536.0, s4/65536.0 );
+        }
+      }
+    }
+  }
+
+  
+  int main( void )
+  {
+    test_cos();
+    test_sin();
+    test_tan();
+    test_atan2();
+    test_unit();
+    test_length();
+    test_rotate();
+        
+    if (!error)
+      printf( "trigonometry test ok !\n" );
+      
+    return !error;
+  }