shithub: freetype+ttf2subf

Download patch

ref: aa8c7da0bfe3a4e93ac380478fbad7d35c0be0d0
parent: d0c36e3b5a1087eff8b88b3bac1106e3cd46844c
author: Werner Lemberg <[email protected]>
date: Mon Jul 3 23:37:18 EDT 2000

Don't use -lefence in the demo Makefile.

Added C++ guards in ftmodule.h

Fix error check in ftglyph.c

Formatting; adding copyrights; fixing documentation

git/fs: mount .git/fs: mount/attach disallowed
--- a/demos/Makefile
+++ b/demos/Makefile
@@ -87,7 +87,7 @@
   # with the program by default on Unix, we thus add it whenever appropriate
   #
   ifeq ($(PLATFORM),unix)
-  LINK += -lm -lefence
+  LINK += -lm
   endif
 
   COMMON_LINK = $(LINK) $(COMMON_OBJ)
--- a/include/freetype/ftmodule.h
+++ b/include/freetype/ftmodule.h
@@ -20,6 +20,11 @@
 
 #include <freetype/freetype.h>
 
+#ifdef __cplusplus
+  extern "C" {
+#endif
+
+
   /* module bit flags */
   typedef enum FT_Module_Flags_
   {
@@ -277,6 +282,10 @@
   /*                                                                       */
   FT_EXPORT_DEF(void)  FT_Add_Default_Modules( FT_Library  library );
 
+
+#ifdef __cplusplus
+  }
+#endif
 
 
 #endif /* FTMODULE_H */
--- a/src/base/ftglyph.c
+++ b/src/base/ftglyph.c
@@ -488,7 +488,7 @@
     if ( !slot )
       return FT_Err_Invalid_Slot_Handle;
 
-    if ( !glyph )
+    if ( !aglyph )
       return FT_Err_Invalid_Argument;
 
     /* if it is a bitmap, that's easy :-) */
@@ -561,6 +561,7 @@
   /* <Note>                                                                */
   /*    The 2x2 transformation matrix is also applied to the glyph's       */
   /*    advance vector.                                                    */
+  /*                                                                       */
   FT_EXPORT_FUNC( FT_Error )  FT_Glyph_Transform( FT_Glyph    glyph,
                                                   FT_Matrix*  matrix,
                                                   FT_Vector*  delta )
--- a/src/base/ftstream.c
+++ b/src/base/ftstream.c
@@ -542,17 +542,17 @@
       case ft_frame_bytes:  /* read a byte sequence */
         {
           FT_Int  len = stream->limit - stream->cursor;
-          
-          if (len > fields->size)
+
+
+          if ( len > fields->size )
             len = fields->size;
 
           p = (FT_Byte*)structure + fields->offset;
           MEM_Copy( p, stream->cursor, len );
-          stream->cursor += len;  
+          stream->cursor += len;
           fields++;
           continue;
         }
-        
 
       case ft_frame_byte:
       case ft_frame_schar:  /* read a single byte */
--- a/src/sfnt/ttload.c
+++ b/src/sfnt/ttload.c
@@ -1348,12 +1348,6 @@
       FT_FRAME_END
     };
 
-    static const FT_Frame_Field  pclt_fields2[] =
-    {
-      FT_FRAME_START( 4 ),
-      FT_FRAME_END
-    };
-
     FT_Error  error;
     TT_PCLT*  pclt = &face->pclt;
 
--- a/src/type1/t1tokens.c
+++ b/src/type1/t1tokens.c
@@ -1,28 +1,34 @@
-/*******************************************************************
- *
- *  t1tokens.c
- *
- *  Type 1 tokenizer
- *
- *  Copyright 1996 David Turner, Robert Wilhelm and Werner Lemberg.
- *
- *  This file is part of the FreeType project, and may only be used
- *  modified and distributed under the terms of the FreeType project
- *  license, LICENSE.TXT. By continuing to use, modify or distribute
- *  this file you indicate that you have read the license and
- *  understand and accept it fully.
- *
- *
- *  The tokenizer is in charge of loading and reading a Type1 font
- *  file (either in PFB or PFA format), and extract successive tokens
- *  and keywords from its two streams (i.e. the font program, and the
- *  private dictionary).
- *
- *  Eexec decryption is performed automatically when entering the
- *  private dictionary, or when retrieving char strings..
- *
- ******************************************************************/
+/***************************************************************************/
+/*                                                                         */
+/*  t1parse.c                                                              */
+/*                                                                         */
+/*    Type 1 parser (body).                                                */
+/*                                                                         */
+/*  Copyright 1996-2000 by                                                 */
+/*  David Turner, Robert Wilhelm, and Werner Lemberg.                      */
+/*                                                                         */
+/*  This file is part of the FreeType project, and may only be used,       */
+/*  modified, and distributed under the terms of the FreeType project      */
+/*  license, LICENSE.TXT.  By continuing to use, modify, or distribute     */
+/*  this file you indicate that you have read the license and              */
+/*  understand and accept it fully.                                        */
+/*                                                                         */
+/***************************************************************************/
 
+
+  /*************************************************************************/
+  /*                                                                       */
+  /* The tokenizer is in charge of loading and reading a Type1 font file   */
+  /* (either in PFB or PFA format), and extracting successive tokens and   */
+  /* keywords from its two streams (i.e. the font program, and the private */
+  /* dictionary).                                                          */
+  /*                                                                       */
+  /* Eexec decryption is performed automatically when entering the private */
+  /* dictionary, or when retrieving char strings.                          */
+  /*                                                                       */
+  /*************************************************************************/
+
+
 #include <freetype/internal/ftstream.h>
 #include <freetype/internal/ftdebug.h>
 
@@ -29,27 +35,37 @@
 #include <t1tokens.h>
 #include <t1load.h>
 
+#include <string.h>     /* for strncmp() */
+
+
 #undef  READ_BUFFER_INCREMENT
 #define READ_BUFFER_INCREMENT  0x400
 
+
+  /*************************************************************************/
+  /*                                                                       */
+  /* The macro FT_COMPONENT is used in trace mode.  It is an implicit      */
+  /* parameter of the FT_TRACE() and FT_ERROR() macros, used to print/log  */
+  /* messages during execution.                                            */
+  /*                                                                       */
 #undef  FT_COMPONENT
 #define FT_COMPONENT  trace_t1load
 
-  /* array of Type1 keywords supported by this engine. This table places */
-  /* the keyword in lexicographical order. It should always correspond   */
-  /* to the enums key_XXXX !!                                            */
-  /*                                                                     */
-  const  char*  t1_keywords[ key_max - key_first_ ] =
+
+  /* An array of Type1 keywords supported by this engine.  This table */
+  /* places the keyword in lexicographical order.  It should always   */
+  /* correspond  to the enums `key_xxx'!                              */
+  /*                                                                  */
+  const char*  t1_keywords[key_max - key_first_] =
   {
     "-|", "ExpertEncoding", "ND", "NP", "RD", "StandardEncoding", "array",
-	"begin", "closefile", "currentdict", "currentfile", "def", "dict", "dup",
-	"eexec", "end", "executeonly", "false", "for", "index",	"noaccess",
-	"put", "readonly", "true", "userdict", "|", "|-"
+    "begin", "closefile", "currentdict", "currentfile", "def", "dict", "dup",
+    "eexec", "end", "executeonly", "false", "for", "index", "noaccess",
+    "put", "readonly", "true", "userdict", "|", "|-"
   };
 
 
-
-  const  char*  t1_immediates[ imm_max - imm_first_ ] =
+  const char*  t1_immediates[imm_max - imm_first_] =
   {
     "-|", ".notdef", "BlendAxisTypes", "BlueFuzz", "BlueScale", "BlueShift",
     "BlueValues", "CharStrings", "Encoding", "FamilyBlues", "FamilyName",
@@ -71,39 +87,46 @@
   {
     int  c2 = 0;
 
+
     for ( ; str1_len > 0; str1_len-- )
     {
-      int c1, diff;
+      int  c1, diff;
 
+
       c1 = *str1++;
       c2 = *str2++;
 
       diff = c1 - c2;
-      if (diff) return diff;
+      if ( diff )
+        return diff;
     };
+
     return -*str2;
   }
 
 
-  /* Find a given token/name, perform binary search */
+  /* find a given token/name, performing binary search */
   static
-  int  Find_Name( char*  base, int  length,
-                  const char** table, int  table_len )
+  int  Find_Name( char*         base,
+                  int           length,
+                  const char**  table,
+                  int           table_len )
   {
-    /* performs a binary search */
+    int  left, right;
 
-    FT_Int  left, right;
 
     left  = 0;
-    right = table_len-1;
+    right = table_len - 1;
 
-    while ( right-left > 1 )
+    while ( right - left > 1 )
     {
-      FT_Int  middle = left + (( right-left ) >> 1);
-      FT_Int  cmp;
+      int  middle = left + ( ( right - left ) >> 1 );
+      int  cmp;
 
+
       cmp = lexico_strcmp( base, length, table[middle] );
-      if (!cmp) return middle;
+      if ( !cmp )
+        return middle;
 
       if ( cmp < 0 )
         right = middle;
@@ -111,8 +134,10 @@
         left  = middle;
     }
 
-    if ( !lexico_strcmp( base, length, table[left ] ) ) return left;
-    if ( !lexico_strcmp( base, length, table[right] ) ) return right;
+    if ( !lexico_strcmp( base, length, table[left ] ) )
+      return left;
+    if ( !lexico_strcmp( base, length, table[right] ) )
+      return right;
 
     return -1;
   }
@@ -120,18 +145,20 @@
 
   /* read the small PFB section header */
   static
-  FT_Error  Read_PFB_Tag( FT_Stream  stream,
-                          FT_UShort* atag,
-                          FT_ULong*  asize )
+  FT_Error  Read_PFB_Tag( FT_Stream   stream,
+                          FT_UShort*  atag,
+                          FT_ULong*   asize )
   {
-    FT_UShort tag;
-    FT_ULong  size;
-    FT_Error  error;
+    FT_UShort  tag;
+    FT_ULong   size;
+    FT_Error   error;
 
-    FT_TRACE2(( "Read_PFB_Tag : reading\n" ));
 
-    if ( ACCESS_Frame( 6L ) ) return error;
+    FT_TRACE2(( "Read_PFB_Tag: reading\n" ));
 
+    if ( ACCESS_Frame( 6L ) )
+      return error;
+
     tag  = GET_UShort();
     size = GET_ULong();
 
@@ -138,10 +165,10 @@
     FORGET_Frame();
 
     *atag  = tag;
-    *asize = (  (size        & 0xFF) << 24 ) |
-             ( ((size >> 8)  & 0xFF) << 16 ) |
-             ( ((size >> 16) & 0xFF) << 8 )  |
-             ( ((size >> 24) & 0xFF) );
+    *asize = (   ( size         & 0xFF ) << 24 ) |
+             ( ( ( size >> 8  ) & 0xFF ) << 16 ) |
+             ( ( ( size >> 16 ) & 0xFF ) << 8 )  |
+             ( ( ( size >> 24 ) & 0xFF ) );
 
     FT_TRACE2(( "  tag  = %04x\n", tag    ));
     FT_TRACE4(( "  asze = %08x\n", size   ));
@@ -151,7 +178,6 @@
   }
 
 
-
   static
   FT_Error  grow( T1_Tokenizer  tokzer )
   {
@@ -159,6 +185,7 @@
     FT_Long    left_bytes;
     FT_Memory  memory = tokzer->memory;
 
+
     left_bytes = tokzer->max - tokzer->limit;
 
     if ( left_bytes > 0 )
@@ -165,6 +192,7 @@
     {
       FT_Stream stream = tokzer->stream;
 
+
       if ( left_bytes > READ_BUFFER_INCREMENT )
         left_bytes = READ_BUFFER_INCREMENT;
 
@@ -177,7 +205,7 @@
     }
     else
     {
-      FT_ERROR(( "Unexpected end of Type1 fragment !!\n" ));
+      FT_ERROR(( "Unexpected end of Type1 fragment!\n" ));
       error = T1_Err_Invalid_File_Format;
     }
 
@@ -186,8 +214,20 @@
   }
 
 
-
-
+  /*************************************************************************/
+  /*                                                                       */
+  /* <Function>                                                            */
+  /*    t1_decrypt                                                         */
+  /*                                                                       */
+  /* <Description>                                                         */
+  /*    Performs the Type 1 charstring decryption process.                 */
+  /*                                                                       */
+  /* <Input>                                                               */
+  /*    buffer  :: The base address of the data to decrypt.                */
+  /*    length  :: The number of bytes to decrypt (beginning from the base */
+  /*               address.                                                */
+  /*    seed    :: The encryption seed (4330 for charstrings).             */
+  /*                                                                       */
   LOCAL_FUNC
   void  t1_decrypt( FT_Byte*   buffer,
                     FT_Int     length,
@@ -197,8 +237,9 @@
     {
       FT_Byte  plain;
 
-      plain     = (*buffer ^ (seed >> 8));
-      seed      = (*buffer+seed)*52845+22719;
+
+      plain     = ( *buffer ^ ( seed >> 8 ) );
+      seed      = ( *buffer + seed ) * 52845 + 22719;
       *buffer++ = plain;
       length--;
     }
@@ -205,794 +246,842 @@
   }
 
 
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> New_Tokenizer                                             */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     Creates a new tokenizer from a given input stream. This function  */
- /*     automatically recognizes "pfa" or "pfb" files. The function       */
- /*     "Read_Token" can then be used to extract successive tokens from   */
- /*     the stream..                                                      */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     stream  :: input stream                                           */
- /*                                                                       */
- /*  <Output>                                                             */
- /*     tokenizer :: handle to new tokenizer object..                     */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
- /*  <Note>                                                               */
- /*     This function copies the stream handle within the object. Callers */
- /*     should not discard "stream". This is done by the Done_Tokenizer   */
- /*     function..                                                        */
- /*                                                                       */
+  /*************************************************************************/
+  /*                                                                       */
+  /* <Function>                                                            */
+  /*    New_Tokenizer                                                      */
+  /*                                                                       */
+  /* <Description>                                                         */
+  /*    Creates a new tokenizer from a given input stream.  This function  */
+  /*    automatically recognizes `pfa' or `pfb' files.  The function       */
+  /*    Read_Token() can then be used to extract successive tokens from    */
+  /*    the stream.                                                        */
+  /*                                                                       */
+  /* <Input>                                                               */
+  /*    stream    :: The input stream.                                     */
+  /*                                                                       */
+  /* <Output>                                                              */
+  /*    tokenizer :: A handle to a new tokenizer object.                   */
+  /*                                                                       */
+  /* <Return>                                                              */
+  /*    FreeType error code.  0 means success.                             */
+  /*                                                                       */
+  /* <Note>                                                                */
+  /*    This function copies the stream handle within the object.  Callers */
+  /*    should not discard `stream'.  This is done by the Done_Tokenizer() */
+  /*    function.                                                          */
+  /*                                                                       */
+  LOCAL_FUNC
+  FT_Error  New_Tokenizer( FT_Stream      stream,
+                           T1_Tokenizer*  tokenizer )
+  {
+    FT_Memory     memory = stream->memory;
+    T1_Tokenizer  tokzer;
+    FT_Error      error;
+    FT_UShort     tag;
+    FT_ULong      size;
 
- LOCAL_FUNC
- FT_Error  New_Tokenizer( FT_Stream      stream,
-                          T1_Tokenizer*  tokenizer )
- {
-   FT_Memory     memory = stream->memory;
-   T1_Tokenizer  tokzer;
-   FT_Error      error;
-   FT_UShort     tag;
-   FT_ULong      size;
+    FT_Byte*      tok_base;
+    FT_ULong      tok_limit;
+    FT_ULong      tok_max;
 
-   FT_Byte*      tok_base;
-   FT_ULong      tok_limit;
-   FT_ULong      tok_max;
 
-   *tokenizer = 0;
+    *tokenizer = 0;
 
-   /* allocate object */
-   if ( FILE_Seek( 0L )                     ||
-        ALLOC( tokzer, sizeof(*tokzer) ) )
-     return error;
+    /* allocate object */
+    if ( FILE_Seek( 0L )                     ||
+         ALLOC( tokzer, sizeof ( *tokzer ) ) )
+      return error;
 
-   tokzer->stream = stream;
-   tokzer->memory = stream->memory;
+    tokzer->stream = stream;
+    tokzer->memory = stream->memory;
 
-   tokzer->in_pfb     = 0;
-   tokzer->in_private = 0;
+    tokzer->in_pfb     = 0;
+    tokzer->in_private = 0;
 
-   tok_base  = 0;
-   tok_limit = 0;
-   tok_max   = stream->size;
+    tok_base  = 0;
+    tok_limit = 0;
+    tok_max   = stream->size;
 
-   error = Read_PFB_Tag( stream, &tag, &size );
-   if (error) goto Fail;
+    error = Read_PFB_Tag( stream, &tag, &size );
+    if ( error )
+      goto Fail;
 
-   if ( tag != 0x8001 )
-   {
-     /* assume that it is a PFA file - an error will be produced later */
-     /* if a character with value > 127 is encountered..               */
+    if ( tag != 0x8001 )
+    {
+      /* assume that it is a PFA file -- an error will be produced later */
+      /* if a character with value > 127 is encountered                  */
 
-     /* rewind to start of file */
-     if ( FILE_Seek(0L) ) goto Fail;
+      /* rewind to start of file */
+      if ( FILE_Seek( 0L ) )
+        goto Fail;
 
-     size = stream->size;
-   }
-   else
-     tokzer->in_pfb = 1;
+      size = stream->size;
+    }
+    else
+      tokzer->in_pfb = 1;
 
-   /* if it's a memory-based resource, set up pointer */
-   if ( !stream->read )
-   {
-     tok_base  = (FT_Byte*)stream->base + stream->pos;
-     tok_limit = size;
-     tok_max   = size;
+    /* if it is a memory-based resource, set up pointer */
+    if ( !stream->read )
+    {
+      tok_base  = (FT_Byte*)stream->base + stream->pos;
+      tok_limit = size;
+      tok_max   = size;
 
-     /* check that the "size" field is valid */
-     if ( FILE_Skip(size) ) goto Fail;
-   }
-   else if ( tag == 0x8001 )
-   {
-     /* read segment in memory */
-     if ( ALLOC( tok_base, size ) )
-       goto Fail;
+      /* check that the `size' field is valid */
+      if ( FILE_Skip( size ) )
+        goto Fail;
+    }
+    else if ( tag == 0x8001 )
+    {
+      /* read segment in memory */
+      if ( ALLOC( tok_base, size ) )
+        goto Fail;
 
-     if ( FILE_Read( tok_base, size ) )
-     {
-       FREE( tok_base );
-       goto Fail;
-     }
+      if ( FILE_Read( tok_base, size ) )
+      {
+        FREE( tok_base );
+        goto Fail;
+      }
 
-     tok_limit = size;
-     tok_max   = size;
-   }
+      tok_limit = size;
+      tok_max   = size;
+    }
 
-   tokzer->base   = tok_base;
-   tokzer->limit  = tok_limit;
-   tokzer->max    = tok_max;
-   tokzer->cursor = 0;
+    tokzer->base   = tok_base;
+    tokzer->limit  = tok_limit;
+    tokzer->max    = tok_max;
+    tokzer->cursor = 0;
 
-   *tokenizer = tokzer;
+    *tokenizer = tokzer;
 
-   /* Now check font format, we must see a '%!PS-AdobeFont-1' */
-   /* or a '%!FontType'                                       */
-   {
-     if ( 16 > tokzer->limit )
-       grow( tokzer );
+    /* now check font format; we must see `%!PS-AdobeFont-1' */
+    /* or `%!FontType'                                       */
+    {
+      if ( 16 > tokzer->limit )
+        grow( tokzer );
 
-     if ( tokzer->limit <= 16 ||
-          ( strncmp( (const char*)tokzer->base, "%!PS-AdobeFont-1", 16 ) &&
-            strncmp( (const char*)tokzer->base, "%!FontType", 10 )       ) )
-     {
-       FT_TRACE2(( "Not a Type1 font\n" ));
-       error = FT_Err_Unknown_File_Format;
-       goto Fail;
-     }
-   }
-   return T1_Err_Ok;
+      if ( tokzer->limit <= 16 ||
+           ( strncmp( (const char*)tokzer->base, "%!PS-AdobeFont-1", 16 )   &&
+             strncmp( (const char*)tokzer->base, "%!FontType", 10 )       ) )
+      {
+        FT_TRACE2(( "[not a Type1 font]\n" ));
+        error = FT_Err_Unknown_File_Format;
+        goto Fail;
+      }
+    }
+    return T1_Err_Ok;
 
- Fail:
-   FREE( tokzer );
-   return error;
- }
+  Fail:
+    FREE( tokzer );
+    return error;
+  }
 
 
-
- /* return the value of an hexadecimal digit */
- static
- int  hexa_value( char c )
- {
+  /* return the value of an hexadecimal digit */
+  static
+  int  hexa_value( char  c )
+  {
    unsigned int  d;
 
-   d = (unsigned int)(c-'0');
-   if ( d <= 9 ) return (int)d;
 
-   d = (unsigned int)(c-'a');
-   if ( d <= 5 ) return (int)(d+10);
+    d = (unsigned int)( c - '0' );
+    if ( d <= 9 )
+      return (int)d;
 
-   d = (unsigned int)(c-'A');
-   if ( d <= 5 ) return (int)(d+10);
+    d = (unsigned int)( c - 'a' );
+    if ( d <= 5 )
+      return (int)( d + 10 );
 
-   return -1;
- }
+    d = (unsigned int)( c - 'A' );
+    if ( d <= 5 )
+      return (int)( d + 10 );
 
+    return -1;
+  }
 
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> Done_Tokenizer                                            */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     Closes a given tokenizer. This function will also close the       */
- /*     stream embedded in the object..                                   */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     tokenizer :: target tokenizer object                              */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
 
- LOCAL_FUNC
- FT_Error  Done_Tokenizer( T1_Tokenizer  tokenizer )
- {
-   FT_Memory  memory = tokenizer->memory;
+  /*************************************************************************/
+  /*                                                                       */
+  /* <Function>                                                            */
+  /*    Done_Tokenizer                                                     */
+  /*                                                                       */
+  /* <Description>                                                         */
+  /*    Closes a given tokenizer.  This function will also close the       */
+  /*    stream embedded in the object.                                     */
+  /*                                                                       */
+  /* <Input>                                                               */
+  /*    tokenizer :: The target tokenizer object.                          */
+  /*                                                                       */
+  /* <Return>                                                              */
+  /*    FreeType error code.  0 means success.                             */
+  /*                                                                       */
+  LOCAL_FUNC
+  FT_Error  Done_Tokenizer( T1_Tokenizer  tokenizer )
+  {
+    FT_Memory  memory = tokenizer->memory;
 
-   /* clear read buffer if needed (disk-based resources) */
-   if ( tokenizer->in_private || !tokenizer->stream->base )
-     FREE( tokenizer->base );
 
-   FREE( tokenizer );
-   return T1_Err_Ok;
- }
+    /* clear read buffer if needed (disk-based resources) */
+    if ( tokenizer->in_private || !tokenizer->stream->base )
+      FREE( tokenizer->base );
 
+    FREE( tokenizer );
+    return T1_Err_Ok;
+  }
 
 
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> Open_PrivateDict                                          */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     This function must be called to set the tokenizer to the private  */
- /*     section of the Type1 file. It recognizes automatically the        */
- /*     the kind of eexec encryption used (ascii or binary)..             */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     tokenizer :: target tokenizer object                              */
- /*     lenIV     :: value of the "lenIV" variable..                      */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
+  /*************************************************************************/
+  /*                                                                       */
+  /* <Function>                                                            */
+  /*    Open_PrivateDict                                                   */
+  /*                                                                       */
+  /* <Description>                                                         */
+  /*    This function must be called to set the tokenizer to the private   */
+  /*    section of the Type1 file.  It recognizes automatically the        */
+  /*    the kind of eexec encryption used (ascii or binary).               */
+  /*                                                                       */
+  /* <Input>                                                               */
+  /*    tokenizer :: The target tokenizer object.                          */
+  /*    lenIV     :: The value of the `lenIV' variable.                    */
+  /*                                                                       */
+  /* <Return>                                                              */
+  /*    FreeType error code.  0 means success.                             */
+  /*                                                                       */
+  LOCAL_FUNC
+  FT_Error  Open_PrivateDict( T1_Tokenizer  tokenizer )
+  {
+    T1_Tokenizer  tokzer = tokenizer;
+    FT_Stream     stream = tokzer->stream;
+    FT_Memory     memory = tokzer->memory;
+    FT_Error      error = 0;
 
- LOCAL_FUNC
- FT_Error  Open_PrivateDict( T1_Tokenizer  tokenizer )
- {
-   T1_Tokenizer  tokzer = tokenizer;
-   FT_Stream     stream = tokzer->stream;
-   FT_Memory     memory = tokzer->memory;
-   FT_Error      error = 0;
+    FT_UShort     tag;
+    FT_ULong      size;
 
-   FT_UShort     tag;
-   FT_ULong      size;
+    FT_Byte*      private;
 
-   FT_Byte*      private;
+    /* are we already in the private dictionary ? */
+    if ( tokzer->in_private )
+      return 0;
 
-   /* are we already in the private dictionary ? */
-   if ( tokzer->in_private )
-     return 0;
+    if ( tokzer->in_pfb )
+    {
+      /* in the case of the PFB format, the private dictionary can be */
+      /* made of several segments.  We thus first read the number of  */
+      /* segments to compute the total size of the private dictionary */
+      /* then re-read them into memory.                               */
+      FT_Long   start_pos    = FILE_Pos();
+      FT_ULong  private_size = 0;
 
-   if ( tokzer->in_pfb )
-   {
-     /* in the case of the PFB format, the private dictionary can be  */
-     /* made of several segments. We thus first read the number of    */
-     /* segments to compute the total size of the private dictionary  */
-     /* then re-read them into memory..                               */
-     FT_Long  start_pos    = FILE_Pos();
-     FT_ULong private_size = 0;
 
-     do
-     {
-       error = Read_PFB_Tag( stream, &tag, &size );
-       if (error || tag != 0x8002) break;
+      for (;;)
+      {
+        error = Read_PFB_Tag( stream, &tag, &size );
+        if ( error || tag != 0x8002 )
+          break;
 
-       private_size += size;
+        private_size += size;
 
-       if ( FILE_Skip(size) )
-         goto Fail;
-     }
-     while (1);
+        if ( FILE_Skip( size ) )
+          goto Fail;
+      }
 
-     /* Check that we have a private dictionary there */
-     /* and allocate private dictionary buffer        */
-     if ( private_size == 0 )
-     {
-       FT_ERROR(( "T1.Open_Private: invalid private dictionary section\n" ));
-       error = T1_Err_Invalid_File_Format;
-       goto Fail;
-     }
+      /* check that we have a private dictionary there */
+      /* and allocate private dictionary buffer        */
+      if ( private_size == 0 )
+      {
+        FT_ERROR(( "Open_PrivateDict:" ));
+        FT_ERROR(( " invalid private dictionary section\n" ));
+        error = T1_Err_Invalid_File_Format;
+        goto Fail;
+      }
 
-     if ( ALLOC( private, private_size ) )
-       goto Fail;
+      if ( ALLOC( private, private_size ) )
+        goto Fail;
 
-     /* read all sections into buffer */
-     if ( FILE_Seek( start_pos ) )
-       goto Fail_Private;
+      /* read all sections into buffer */
+      if ( FILE_Seek( start_pos ) )
+        goto Fail_Private;
 
-     private_size = 0;
-     do
-     {
-       error = Read_PFB_Tag( stream, &tag, &size );
-       if (error || tag != 0x8002) { error = 0; break; }
+      private_size = 0;
+      for (;;)
+      {
+        error = Read_PFB_Tag( stream, &tag, &size );
+        if ( error || tag != 0x8002 )
+        {
+          error = 0;
+          break;
+        }
 
-       if ( FILE_Read( private + private_size, size ) )
-         goto Fail_Private;
+        if ( FILE_Read( private + private_size, size ) )
+          goto Fail_Private;
 
-       private_size += size;
-     }
-     while (1);
+        private_size += size;
+      }
 
-     /* we must free the field "tokzer.base" if we're in a disk-based */
-     /* PFB file..                                                    */
-     if (stream->read)
-       FREE( tokzer->base );
+      /* we must free the field `tokzer.base' if we are in a disk-based */
+      /* PFB file.                                                      */
+      if ( stream->read )
+        FREE( tokzer->base );
 
-     tokzer->base   = private;
-     tokzer->cursor = 0;
-     tokzer->limit  = private_size;
-     tokzer->max    = private_size;
-   }
-   else
-   {
-     char*  base;
+      tokzer->base   = private;
+      tokzer->cursor = 0;
+      tokzer->limit  = private_size;
+      tokzer->max    = private_size;
+    }
+    else
+    {
+      char*  base;
 
-     /* we're in a PFA file, read each token until we find "eexec" */
-     while ( tokzer->token.kind2 != key_eexec )
-     {
-       error = Read_Token( tokzer );
-       if (error) goto Fail;
-     }
 
-     /* now determine wether the private dictionary is encoded in binary */
-     /* or hexadecimal ASCII format..                                    */
+      /* we are in a PFA file; read each token until we find `eexec' */
+      while ( tokzer->token.kind2 != key_eexec )
+      {
+        error = Read_Token( tokzer );
+        if ( error )
+          goto Fail;
+      }
 
-     /* we need to access the next 4 bytes (after the final \r following */
-     /* the 'eexec' keyword..) if they all are hexadecimal digits, then  */
-     /*we have a case of ASCII storage..                                 */
-     while ( tokzer->cursor+5 > tokzer->limit )
-     {
-       error = grow( tokzer );
-       if (error) goto Fail;
-     }
+      /* now determine whether the private dictionary is encoded in binary */
+      /* or hexadecimal ASCII format.                                      */
 
-     /* skip whitespace/line feed after "eexec" */
-     base = (char*)tokzer->base + tokzer->cursor + 1;
-     if ( ( hexa_value( base[0] ) | hexa_value( base[1] ) |
-            hexa_value( base[2] ) | hexa_value( base[3] ) ) < 0 )
-     {
-       /* binary encoding - "simply" read the stream */
+      /* we need to access the next 4 bytes (after the final \r following  */
+      /* the `eexec' keyword); if they all are hexadecimal digits, then    */
+      /* we have a case of ASCII storage.                                  */
+      while ( tokzer->cursor + 5 > tokzer->limit )
+      {
+        error = grow( tokzer );
+        if ( error )
+          goto Fail;
+      }
 
-       /* if it's a memory-based resource, we need to allocate a new */
-       /* storage buffer for the private dictionary, as it needs to  */
-       /* be decrypted later..                                       */
-       if ( stream->base )
-       {
-         size = stream->size - tokzer->cursor-1; /* remaining bytes */
+      /* skip whitespace/line feed after `eexec' */
+      base = (char*)tokzer->base + tokzer->cursor + 1;
+      if ( ( hexa_value( base[0] ) | hexa_value( base[1] ) |
+             hexa_value( base[2] ) | hexa_value( base[3] ) ) < 0 )
+      {
+        /* binary encoding -- `simply' read the stream */
 
-         if ( ALLOC( private, size ) )  /* allocate private dict buffer */
-           goto Fail;
+        /* if it is a memory-based resource, we need to allocate a new */
+        /* storage buffer for the private dictionary, as it must be    */
+        /* decrypted later                                             */
+        if ( stream->base )
+        {
+          size = stream->size - tokzer->cursor - 1; /* remaining bytes */
 
-         /* copy eexec-encrypted bytes */
-         MEM_Copy( private, tokzer->base + tokzer->cursor+1, size );
+          if ( ALLOC( private, size ) )  /* allocate private dict buffer */
+            goto Fail;
 
-         /* reset pointers - forget about file mapping */
-         tokzer->base   = private;
-         tokzer->limit  = size;
-         tokzer->max    = size;
-         tokzer->cursor = 0;
-       }
-       /* on the opposite, for disk based resources, we simply grow  */
-       /* the current buffer until its completion, and decrypt the   */
-       /* bytes within it. In all cases, the "base" buffer will be   */
-       /* discarded on DoneTokenizer if we're in the private dict..  */
-       else
-       {
-         /* grow the read buffer to the full file.. */
-         while ( tokzer->limit < tokzer->max )
-         {
-           error = grow( tokenizer );
-           if (error) goto Fail;
-         }
+          /* copy eexec-encrypted bytes */
+          MEM_Copy( private, tokzer->base + tokzer->cursor + 1, size );
 
-         /* set up cursor to first encrypted byte */
-         tokzer->cursor++;
-       }
-     }
-     else
-     {
-       /* ASCII hexadecimal encoding.. This sucks.. */
-       FT_Byte*  write;
-       FT_Byte*  cur;
-       FT_Byte*  limit;
-       FT_Int    count;
+          /* reset pointers - forget about file mapping */
+          tokzer->base   = private;
+          tokzer->limit  = size;
+          tokzer->max    = size;
+          tokzer->cursor = 0;
+        }
+        /* On the opposite, for disk based resources, we simply grow  */
+        /* the current buffer until its completion, and decrypt the   */
+        /* bytes within it.  In all cases, the `base' buffer will be  */
+        /* discarded on DoneTokenizer if we are in the private dict.  */
+        else
+        {
+          /* grow the read buffer to the full file */
+          while ( tokzer->limit < tokzer->max )
+          {
+            error = grow( tokenizer );
+            if ( error )
+              goto Fail;
+          }
 
-       /* Allocate a buffer, read each one byte at a time .. */
-       count = ( stream->size - tokzer->cursor );
-       size  = count/2;
+          /* set up cursor to first encrypted byte */
+          tokzer->cursor++;
+        }
+      }
+      else
+      {
+        /* ASCII hexadecimal encoding.  This sucks... */
+        FT_Byte*  write;
+        FT_Byte*  cur;
+        FT_Byte*  limit;
+        FT_Int    count;
 
-       if ( ALLOC( private, size ) )   /* allocate private dict buffer */
-         goto Fail;
 
-       write = private;
-       cur   = tokzer->base + tokzer->cursor;
-       limit = tokzer->base + tokzer->limit;
+        /* allocate a buffer, read each one byte at a time */
+        count = stream->size - tokzer->cursor;
+        size  = count / 2;
 
-       /* read each bytes */
-       while ( count > 0 )
-       {
-         /* ensure that we can read the next 2 bytes !! */
-         while ( cur+2 > limit )
-         {
-           int  cursor = cur - tokzer->base;
-           error = grow( tokzer );
-           if (error) goto Fail_Private;
-           cur   = tokzer->base + cursor;
-           limit = tokzer->base + tokzer->limit;
-         }
+        if ( ALLOC( private, size ) )   /* allocate private dict buffer */
+          goto Fail;
 
-         /* check for new line */
-         if ( cur[0] == '\r' || cur[0] == '\n' )
-         {
-           cur++;
-           count--;
-         }
-         else
-         {
-           int  hex1 = hexa_value(cur[0]);
+        write = private;
+        cur   = tokzer->base + tokzer->cursor;
+        limit = tokzer->base + tokzer->limit;
 
-           /* exit if we have a non hexa-decimal digit which isn't */
-           /* a new-line character..                               */
-           if (hex1 < 0)
-             break;
+        /* read each bytes */
+        while ( count > 0 )
+        {
+          /* ensure that we can read the next 2 bytes! */
+          while ( cur + 2 > limit )
+          {
+            int  cursor = cur - tokzer->base;
 
-           /* otherwise, store byte */
-           *write++ = ( hex1 << 4 ) | hexa_value( cur[1] );
-           cur += 2;
-           count -= 2;
-         }
-       }
 
-       /* get rid of old buffer in the case of disk-based resources */
-       if ( !stream->base )
-         FREE( tokzer->base );
+            error = grow( tokzer );
+            if ( error )
+              goto Fail_Private;
+            cur   = tokzer->base + cursor;
+            limit = tokzer->base + tokzer->limit;
+          }
 
-       /* set up pointers */
-       tokzer->base   = private;
-       tokzer->limit  = size;
-       tokzer->max    = size;
-       tokzer->cursor = 0;
-     }
-   }
+          /* check for new line */
+          if ( cur[0] == '\r' || cur[0] == '\n' )
+          {
+            cur++;
+            count--;
+          }
+          else
+          {
+            int  hex1 = hexa_value(cur[0]);
 
-   /* finally, decrypt the private dictionary - and skip the lenIV bytes */
-   t1_decrypt( tokzer->base, tokzer->limit, 55665 );
-   tokzer->cursor += 4;
 
- Fail:
-   return error;
+            /* exit if we have a non-hexadecimal digit which isn't */
+            /* a new-line character                                */
+            if ( hex1 < 0 )
+              break;
 
- Fail_Private:
-   FREE( private );
-   goto Fail;
- }
+            /* otherwise, store byte */
+            *write++ = ( hex1 << 4 ) | hexa_value( cur[1] );
+            cur   += 2;
+            count -= 2;
+          }
+        }
 
+        /* get rid of old buffer in the case of disk-based resources */
+        if ( !stream->base )
+          FREE( tokzer->base );
 
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> Read_Token                                                */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     Read a new token from the current input stream. This function     */
- /*     extracts a token from the font program until "Open_PrivateDict"   */
- /*     has been called. After this, it returns tokens from the           */
- /*     (eexec-encrypted) private dictionnary..                           */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     tokenizer :: target tokenizer object                              */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
- /*  <Note>                                                               */
- /*     One should use the function Read_CharStrings to read the binary   */
- /*     charstrings from the private dict..                               */
- /*                                                                       */
- LOCAL_FUNC
- FT_Error  Read_Token( T1_Tokenizer  tokenizer )
- {
-   T1_Tokenizer tok = tokenizer;
-   FT_Long      cur, limit;
-   FT_Byte*     base;
-   char         c, starter, ender;
-   FT_Bool      token_started;
+        /* set up pointers */
+        tokzer->base   = private;
+        tokzer->limit  = size;
+        tokzer->max    = size;
+        tokzer->cursor = 0;
+      }
+    }
 
-   T1_TokenType  kind;
+    /* finally, decrypt the private dictionary - and skip the lenIV bytes */
+    t1_decrypt( tokzer->base, tokzer->limit, 55665 );
+    tokzer->cursor += 4;
 
-   tok->error      = T1_Err_Ok;
-   tok->token.kind = tok_any;
+  Fail:
+    return error;
 
-   base  = tok->base;
-   limit = tok->limit;
-   cur   = tok->cursor;
+  Fail_Private:
+    FREE( private );
+    goto Fail;
+  }
 
-   token_started = 0;
 
-   for (;;)
-   {
-     if ( cur >= limit )
-     {
-       if ( grow( tok ) ) goto Exit;
-       base  = tok->base;
-       limit = tok->limit;
-     }
+  /*************************************************************************/
+  /*                                                                       */
+  /* <Function>                                                            */
+  /*    Read_Token                                                         */
+  /*                                                                       */
+  /* <Description>                                                         */
+  /*    Reads a new token from the current input stream.  This function    */
+  /*    extracts a token from the font program until Open_PrivateDict()    */
+  /*    has been called.  After this, it returns tokens from the           */
+  /*    (eexec-encrypted) private dictionary.                              */
+  /*                                                                       */
+  /* <Input>                                                               */
+  /*    tokenizer :: The target tokenizer object.                          */
+  /*                                                                       */
+  /* <Return>                                                              */
+  /*    FreeType error code.  0 means success.                             */
+  /*                                                                       */
+  /* <Note>                                                                */
+  /*    Use the function Read_CharStrings() to read the binary charstrings */
+  /*    from the private dict.                                             */
+  /*                                                                       */
+  LOCAL_FUNC
+  FT_Error  Read_Token( T1_Tokenizer  tokenizer )
+  {
+    T1_Tokenizer  tok = tokenizer;
+    FT_Long       cur, limit;
+    FT_Byte*      base;
+    char          c, starter, ender;
+    FT_Bool       token_started;
 
-     c = (char)base[cur++];
+    T1_TokenType  kind;
 
-     /* check that we have an ASCII character */
-     if ( (FT_Byte)c > 127 )
-     {
-       FT_ERROR(( "Unexpected binary data in Type1 fragment !!\n" ));
-       tok->error = T1_Err_Invalid_File_Format;
-       goto Exit;
-     }
 
-     switch (c)
-     {
-       case '\r' :
-       case '\n' :
-       case ' '  :
-       case '\t' : /* skip initial whitespace => skip to next */
-         if (token_started)
-         {
-           /* possibly a name, keyword, wathever */
-           tok->token.kind = tok_any;
-           tok->token.len  = cur-tok->token.start-1;
-           goto Exit;
-         }
-         /* otherwise, skip everything */
-         break;
+    tok->error      = T1_Err_Ok;
+    tok->token.kind = tok_any;
 
+    base  = tok->base;
+    limit = tok->limit;
+    cur   = tok->cursor;
 
-       case '%' : /* this is a comment - skip everything */
-         for (;;)
-         {
-           FT_Int  left = limit - cur;
+    token_started = 0;
 
-           while (left > 0)
-           {
-             c = (char)base[cur++];
-             if ( c == '\r' || c == '\n' )
-               goto Next;
-             left--;
-           }
-           if ( grow( tokenizer ) ) goto Exit;
-           base  = tok->base;
-           limit = tok->limit;
-         }
+    for (;;)
+    {
+      if ( cur >= limit )
+      {
+        if ( grow( tok ) )
+          goto Exit;
+        base  = tok->base;
+        limit = tok->limit;
+      }
 
+      c = (char)base[cur++];
 
-       case '(' : /* a Postscript string */
-         kind  = tok_string;
-         ender = ')';
+      /* check that we have an ASCII character */
+      if ( (FT_Byte)c > 127 )
+      {
+        FT_ERROR(( "Read_Token:" ));
+        FT_ERROR(( " unexpected binary data in Type1 fragment!\n" ));
+        tok->error = T1_Err_Invalid_File_Format;
+        goto Exit;
+      }
 
-       L1:
-         if (!token_started)
-         {
-           token_started    = 1;
-           tok->token.start = cur-1;
-         }
+      switch ( c )
+      {
+      case '\r':
+      case '\n':
+      case ' ' :
+      case '\t':    /* skip initial whitespace => skip to next */
+        if ( token_started )
+        {
+          /* possibly a name, keyword, wathever */
+          tok->token.kind = tok_any;
+          tok->token.len  = cur-tok->token.start - 1;
+          goto Exit;
+        }
+        /* otherwise, skip everything */
+        break;
 
-         {
-           FT_Int  nest_level = 1;
+      case '%':     /* this is a comment -- skip everything */
+        for (;;)
+        {
+          FT_Int  left = limit - cur;
 
-           starter = c;
-           for (;;)
-           {
-             FT_Int  left = limit-cur;
-             while (left > 0)
-             {
-               c = (char)base[cur++];
 
-               if ( c == starter )
-                 nest_level++;
+          while ( left > 0 )
+          {
+            c = (char)base[cur++];
+            if ( c == '\r' || c == '\n' )
+              goto Next;
+            left--;
+          }
 
-               else if ( c == ender )
-               {
-                 nest_level--;
-                 if (nest_level <= 0)
-                 {
-                   tok->token.kind = kind;
-                   tok->token.len  = cur - tok->token.start;
-                   goto Exit;
-                 }
-               }
-               left--;
-             }
+          if ( grow( tokenizer ) )
+            goto Exit;
+          base  = tok->base;
+          limit = tok->limit;
+        }
 
-             if ( grow( tok ) ) goto Exit;
-             base  = tok->base;
-             limit = tok->limit;
-           }
-         }
+      case '(':     /* a Postscript string */
+        kind  = tok_string;
+        ender = ')';
 
+      L1:
+        if ( !token_started )
+        {
+          token_started    = 1;
+          tok->token.start = cur - 1;
+        }
 
-     case '[' : /* a Postscript array */
-       if (token_started)
-         goto Any_Token;
+        {
+          FT_Int  nest_level = 1;
 
-       kind  = tok_array;
-       ender = ']';
-       goto L1;
-       break;
 
+          starter = c;
+          for (;;)
+          {
+            FT_Int  left = limit - cur;
 
-     case '{' : /* a Postscript program */
-       if (token_started)
-         goto Any_Token;
 
-       kind  = tok_program;
-       ender = '}';
-       goto L1;
-       break;
+            while ( left > 0 )
+            {
+              c = (char)base[cur++];
 
+              if ( c == starter )
+                nest_level++;
 
-     case '<' : /* a Postscript hex byte array ?? */
-       if (token_started)
-         goto Any_Token;
+              else if ( c == ender )
+              {
+                nest_level--;
+                if ( nest_level <= 0 )
+                {
+                  tok->token.kind = kind;
+                  tok->token.len  = cur - tok->token.start;
+                  goto Exit;
+                }
+              }
+              left--;
+            }
 
-       kind  = tok_hexarray;
-       ender = '>';
-       goto L1;
-       break;
+            if ( grow( tok ) )
+              goto Exit;
+            base  = tok->base;
+            limit = tok->limit;
+          }
+        }
 
+      case '[':   /* a Postscript array */
+        if ( token_started )
+          goto Any_Token;
 
-     case '0':  /* any number */
-     case '1':
-     case '2':
-     case '3':
-     case '4':
-     case '5':
-     case '6':
-     case '7':
-     case '8':
-     case '9':
-       if (token_started)
-         goto Next;
+        kind  = tok_array;
+        ender = ']';
+        goto L1;
+        break;
 
-       tok->token.kind = tok_number;
-       token_started    = 1;
-       tok->token.start = cur-1;
-     L2:
-       for (;;)
-       {
-         FT_Int  left = limit-cur;
-         while (left > 0)
-         {
-           c = (char)base[cur++];
+      case '{':   /* a Postscript program */
+        if ( token_started )
+          goto Any_Token;
 
-           switch (c)
-           {
-             case '[':
-             case '{':
-             case '(':
-             case '<':
-             case '/':
-               goto Any_Token;
+        kind  = tok_program;
+        ender = '}';
+        goto L1;
+        break;
 
-             case  ' ':
-             case '\r':
-             case '\t':
-             case '\n':
-               tok->token.len = cur - tok->token.start - 1;
-               goto Exit;
+      case '<':   /* a Postscript hex byte array? */
+        if ( token_started )
+          goto Any_Token;
 
-             default:
-               ;
-           }
-           left--;
-         }
-         if (grow( tok )) goto Exit;
-         base  = tok->base;
-         limit = tok->limit;
-       }
+        kind  = tok_hexarray;
+        ender = '>';
+        goto L1;
+        break;
 
+      case '0':  /* any number */
+      case '1':
+      case '2':
+      case '3':
+      case '4':
+      case '5':
+      case '6':
+      case '7':
+      case '8':
+      case '9':
+        if ( token_started )
+          goto Next;
 
-     case '.':   /* maybe a number */
-     case '-':
-     case '+':
-       if (token_started)
-         goto Next;
+        tok->token.kind  = tok_number;
+        token_started    = 1;
+        tok->token.start = cur - 1;
 
-       token_started    = 1;
-       tok->token.start = cur-1;
-       for (;;)
-       {
-         FT_Int  left = limit-cur;
-         if ( left > 0 )
-         {
-           /* test for any following digit, interpreted as number */
-           c = (char)base[cur];
-           tok->token.kind = ( c >= '0' && c <= '9' ? tok_number : tok_any );
-           goto L2;
-         }
-         if (grow( tok )) goto Exit;
-         base  = tok->base;
-         limit = tok->limit;
-       }
+      L2:
+        for (;;)
+        {
+          FT_Int  left = limit-cur;
 
-     case '/':  /* maybe an immediate name */
-       if (!token_started)
-       {
-         token_started    = 1;
-         tok->token.start = cur-1;
 
-         for (;;)
-         {
-           FT_Int  left = limit-cur;
-           if ( left > 0 )
-           {
-             /* test for single '/', interpreted as garbage */
-             c = (char)base[cur];
-             tok->token.kind = ( c == ' '  || c == '\t' ||
-                                 c == '\r' || c == '\n' ?
-                                 tok_any : tok_immediate );
-             goto L2;
-           }
-           if (grow( tok )) goto Exit;
-           base  = tok->base;
-           limit = tok->limit;
-         }
-       }
-       else
-       {
-   Any_Token:        /* possibly a name or wathever */
-         cur--;
-         tok->token.len = cur - tok->token.start;
-         goto Exit;
-       }
+          while ( left > 0 )
+          {
+            c = (char)base[cur++];
 
-     default:
-       if (!token_started)
-       {
-         token_started    = 1;
-         tok->token.start = cur-1;
-       }
-     }
+            switch ( c )
+            {
+            case '[':                     /* ] */
+            case '{':                     /* } */
+            case '(':                     /* ) */
+            case '<':
+            case '/':
+              goto Any_Token;
 
- Next:
-   ;
-   }
+            case ' ':
+            case '\r':
+            case '\t':
+            case '\n':
+              tok->token.len = cur - tok->token.start - 1;
+              goto Exit;
 
- Exit:
-   tok->cursor = cur;
+            default:
+              ;
+            }
+            left--;
+          }
 
-   if (!tok->error)
-   {
-     /* now, tries to match keywords and immediate names */
-     FT_Int  index;
+          if ( grow( tok ) )
+            goto Exit;
+          base  = tok->base;
+          limit = tok->limit;
+        }
 
-     switch ( tok->token.kind )
-     {
-       case tok_immediate :  /* immediate name */
-         index = Find_Name( (char*)(tok->base + tok->token.start+1),
-                            tok->token.len-1,
-                            t1_immediates,
-                            imm_max - imm_first_ );
-         tok->token.kind2 = ( index >= 0 ? imm_first_ + index : 0 );
-         break;
+      case '.':   /* maybe a number */
+      case '-':
+      case '+':
+        if ( token_started )
+           goto Next;
 
+        token_started    = 1;
+        tok->token.start = cur - 1;
 
-       case tok_any : /* test for keyword */
-         index = Find_Name( (char*)(tok->base + tok->token.start),
-                	    tok->token.len,
-                            t1_keywords,
-                            key_max - key_first_ );
-         if ( index >= 0 )
-         {
-           tok->token.kind  = tok_keyword;
-           tok->token.kind2 = key_first_ + index;
-         }
-         else
-           tok->token.kind2 = 0;
-         break;
+        for (;;)
+        {
+          FT_Int  left = limit - cur;
 
-       default:
+
+          if ( left > 0 )
+          {
+            /* test for any following digit, interpreted as number */
+            c = (char)base[cur];
+            tok->token.kind = ( c >= '0' && c <= '9' ? tok_number : tok_any );
+            goto L2;
+          }
+ 
+          if ( grow( tok ) )
+            goto Exit;
+          base  = tok->base;
+          limit = tok->limit;
+        }
+
+      case '/':  /* maybe an immediate name */
+        if ( !token_started )
+        {
+          token_started    = 1;
+          tok->token.start = cur - 1;
+
+          for (;;)
+          {
+            FT_Int  left = limit - cur;
+
+
+            if ( left > 0 )
+            {
+              /* test for single '/', interpreted as garbage */
+              c = (char)base[cur];
+              tok->token.kind = ( c == ' '  || c == '\t' ||
+                                  c == '\r' || c == '\n' ) ? tok_any
+                                                           : tok_immediate;
+              goto L2;
+            }
+
+            if ( grow( tok ) )
+              goto Exit;
+            base  = tok->base;
+            limit = tok->limit;
+          }
+        }
+        else
+        {
+      Any_Token:        /* possibly a name or wathever */
+          cur--;
+          tok->token.len = cur - tok->token.start;
+          goto Exit;
+        }
+
+      default:
+        if ( !token_started )
+        {
+          token_started    = 1;
+          tok->token.start = cur - 1;
+        }
+      }
+
+    Next:
+      ;
+    }
+
+  Exit:
+    tok->cursor = cur;
+
+    if ( !tok->error )
+    {
+      /* now, tries to match keywords and immediate names */
+      FT_Int  index;
+
+
+      switch ( tok->token.kind )
+      {
+      case tok_immediate:   /* immediate name */
+        index = Find_Name( (char*)( tok->base + tok->token.start + 1 ),
+                           tok->token.len - 1,
+                           t1_immediates,
+                           imm_max - imm_first_ );
+        tok->token.kind2 = ( index >= 0 ) ? imm_first_ + index : 0;
+        break;
+
+      case tok_any:         /* test for keyword */
+        index = Find_Name( (char*)( tok->base + tok->token.start ),
+                           tok->token.len,
+                           t1_keywords,
+                           key_max - key_first_ );
+        if ( index >= 0 )
+        {
+          tok->token.kind  = tok_keyword;
+          tok->token.kind2 = key_first_ + index;
+        }
+        else
+          tok->token.kind2 = 0;
+        break;
+
+      default:
          tok->token.kind2 = 0;
-     }
-   }
-   return tokenizer->error;
- }
+      }
+    }
+    return tokenizer->error;
+  }
 
 
 #if 0
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> Read_CharStrings                                          */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     Read a charstrings from the current input stream. These are       */
- /*     binary bytes that encode each individual glyph outline.           */
- /*                                                                       */
- /*     The caller is responsible for skipping the "lenIV" bytes at       */
- /*     the start of the record..                                         */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     tokenizer :: target tokenizer object                              */
- /*     num_chars :: number of binary bytes to read                       */
- /*                                                                       */
- /*  <Output>                                                             */
- /*     buffer    :: target array of bytes. These are eexec-decrypted..   */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
- /*  <Note>                                                               */
- /*     One should use the function Read_CharStrings to read the binary   */
- /*     charstrings from the private dict..                               */
- /*                                                                       */
- LOCAL_FUNC
- FT_Error  Read_CharStrings( T1_Tokenizer  tokenizer,
-                             FT_Int        num_chars,
-                             FT_Byte*      buffer )
- {
-   for (;;)
-   {
-     FT_Int  left = tokenizer->limit - tokenizer->cursor;
 
-     if ( left >= num_chars )
-     {
-       MEM_Copy( buffer, tokenizer->base + tokenizer->cursor, num_chars );
-       t1_decrypt( buffer, num_chars, 4330 );
-       tokenizer->cursor += num_chars;
-       return T1_Err_Ok;
-     }
+  /*************************************************************************/
+  /*                                                                       */
+  /* <Function>                                                            */
+  /*    Read_CharStrings                                                   */
+  /*                                                                       */
+  /* <Description>                                                         */
+  /*    Reads a charstrings element from the current input stream.  These  */
+  /*    are binary bytes that encode each individual glyph outline.        */
+  /*                                                                       */
+  /*    The caller is responsible for skipping the `lenIV' bytes at the    */
+  /*    start of the record.                                               */
+  /*                                                                       */
+  /* <Input>                                                               */
+  /*    tokenizer :: The target tokenizer object.                          */
+  /*    num_chars :: The number of binary bytes to read.                   */
+  /*                                                                       */
+  /* <Output>                                                              */
+  /*    buffer    :: The target array of bytes.  These are                 */
+  /*    eexec-decrypted.                                                   */
+  /*                                                                       */
+  /* <Return>                                                              */
+  /*    FreeType error code.  0 means success.                             */
+  /*                                                                       */
+  /* <Note>                                                                */
+  /*    Use the function Read_CharStrings() to read binary charstrings     */
+  /*    from the private dict.                                             */
+  /*                                                                       */
+  LOCAL_FUNC
+  FT_Error  Read_CharStrings( T1_Tokenizer  tokenizer,
+                              FT_Int        num_chars,
+                              FT_Byte*      buffer )
+  {
+    for (;;)
+    {
+      FT_Int  left = tokenizer->limit - tokenizer->cursor;
 
-     if ( grow(tokenizer) ) return tokenizer->error;
-   }
- }
-#endif
+
+      if ( left >= num_chars )
+      {
+        MEM_Copy( buffer, tokenizer->base + tokenizer->cursor, num_chars );
+        t1_decrypt( buffer, num_chars, 4330 );
+        tokenizer->cursor += num_chars;
+        return T1_Err_Ok;
+      }
+
+      if ( grow( tokenizer ) )
+        return tokenizer->error;
+    }
+  }
+
+#endif /* 0 */
+
+
+/* END */
--- a/src/type1/t1tokens.h
+++ b/src/type1/t1tokens.h
@@ -1,43 +1,38 @@
-/*******************************************************************
- *
- *  t1tokens.h
- *
- *  Type 1 tokenizer
- *
- *  Copyright 1996 David Turner, Robert Wilhelm and Werner Lemberg.
- *
- *  This file is part of the FreeType project, and may only be used
- *  modified and distributed under the terms of the FreeType project
- *  license, LICENSE.TXT. By continuing to use, modify or distribute
- *  this file you indicate that you have read the license and
- *  understand and accept it fully.
- *
- *  The tokenizer is in charge of loading and reading a Type1 font
- *  file (either in PFB or PFA format), and extract successive tokens
- *  and keywords from its two streams (i.e. the font program, and the
- *  private dictionary).
- *
- *  Eexec decryption is performed automatically when entering the
- *  private dictionary, or when retrieving char strings..
- *
- ******************************************************************/
+/***************************************************************************/
+/*                                                                         */
+/*  t1tokens.h                                                             */
+/*                                                                         */
+/*    Type 1 tokenizer (specification).                                    */
+/*                                                                         */
+/*  Copyright 1996-2000 by                                                 */
+/*  David Turner, Robert Wilhelm, and Werner Lemberg.                      */
+/*                                                                         */
+/*  This file is part of the FreeType project, and may only be used,       */
+/*  modified, and distributed under the terms of the FreeType project      */
+/*  license, LICENSE.TXT.  By continuing to use, modify, or distribute     */
+/*  this file you indicate that you have read the license and              */
+/*  understand and accept it fully.                                        */
+/*                                                                         */
+/***************************************************************************/
 
+
 #ifndef T1TOKENS_H
 #define T1TOKENS_H
 
 #include <t1objs.h>
 
-/* enum value of first keyword */
-#define key_first_     100
+  /* enum value of first keyword */
+#define key_first_  100
 
-/* enum value of first immediate name */
-#define imm_first_     200
+  /* enum value of first immediate name */
+#define imm_first_  200
 
-  typedef  enum T1_TokenType_
+
+  typedef enum  T1_TokenType_
   {
     tok_error = 0,
 
-    tok_eof,             /* end of file              */
+    tok_eof,             /* end of file                  */
 
     /* simple token types */
 
@@ -50,14 +45,14 @@
     tok_hexarray,        /* array of hexadecimal nibbles */
     tok_any,             /* anything else                */
 
-    /* Postscript keywords - placed in lexicographical order */
+    /* Postscript keywords -- placed in lexicographical order */
 
-    key_RD_alternate = key_first_,      /* "-|" = alternate form of RD */
-	key_ExpertEncoding,
+    key_RD_alternate = key_first_,      /* `-|' = alternate form of RD */
+    key_ExpertEncoding,
     key_ND,
     key_NP,
     key_RD,
-	key_StandardEncoding,
+    key_StandardEncoding,
     key_array,
     key_begin,
     key_closefile,
@@ -77,16 +72,16 @@
     key_readonly,
     key_true,
     key_userdict,
-    key_NP_alternate,                   /* "|" = alternate form of NP  */
-    key_ND_alternate,                   /* "|-" = alternate form of ND */
+    key_NP_alternate,                   /* `|' = alternate form of NP  */
+    key_ND_alternate,                   /* `|-' = alternate form of ND */
 
     key_max,   /* always keep this value there */
 
-    /* Postscript immediate names - other names will be ignored, except */
-    /* in charstrings..                                                 */
+    /* Postscript immediate names -- other names will be ignored, except */
+    /* in charstrings                                                    */
 
-    imm_RD_alternate = imm_first_,      /* "-|" = alternate form of RD */
-    imm_notdef,                         /* "/.notdef" immediate        */
+    imm_RD_alternate = imm_first_,      /* `-|' = alternate form of RD */
+    imm_notdef,                         /* `/.notdef' immediate        */
     imm_BlendAxisTypes,
     imm_BlueFuzz,
     imm_BlueScale,
@@ -135,8 +130,8 @@
     imm_password,
     imm_version,
 
-    imm_NP_alternate,                   /* "|"  = alternate form of NP  */
-    imm_ND_alternate,                   /* "|-" = alternate form of ND  */
+    imm_NP_alternate,                   /* `|'  = alternate form of NP */
+    imm_ND_alternate,                   /* `|-' = alternate form of ND */
 
     imm_max   /* always keep this value here */
 
@@ -143,208 +138,95 @@
   } T1_TokenType;
 
 
-  /* these arrays are visible for debugging purposes.. */
-  extern const  char*  t1_keywords[];
-  extern const  char*  t1_immediates[];
+  /* these arrays are visible for debugging purposes */
+  extern const char*  t1_keywords[];
+  extern const char*  t1_immediates[];
 
 
- /*************************************************************************/
- /*                                                                       */
- /*  <Struct> T1_Token                                                    */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     A structure used to describe a token in the current input         */
- /*     stream. Note that the Type1 driver doesn't try to interpret       */
- /*     tokens until it really needs to..                                 */
- /*                                                                       */
- /*  <Fields>                                                             */
- /*     kind  :: token type. Describes the token to the loader            */
- /*     kind2 :: detailed token type.                                     */
- /*                                                                       */
- /*     start ::  index of first character of token in input stream       */
- /*                                                                       */
- /*     len   ::  length of token in characters.                          */
- /*                                                                       */
-  typedef struct T1_Token_
+  /*************************************************************************/
+  /*                                                                       */
+  /* <Struct>                                                              */
+  /*    T1_Token                                                           */
+  /*                                                                       */
+  /* <Description>                                                         */
+  /*    A structure used to describe a token in the current input stream.  */
+  /*    Note that the Type1 driver doesn't try to interpret tokens until   */
+  /*    it really needs to.                                                */
+  /*                                                                       */
+  /* <Fields>                                                              */
+  /*    kind  :: The token type.  Describes the token to the loader.       */
+  /*                                                                       */
+  /*    kind2 :: Detailed token type.                                      */
+  /*                                                                       */
+  /*    start :: The index of the first character of token in the input    */
+  /*             stream.                                                   */
+  /*                                                                       */
+  /*    len   :: The length of the token in characters.                    */
+  /*                                                                       */
+  typedef struct  T1_Token_
   {
-    T1_TokenType   kind;     /* simple type                    */
-    T1_TokenType   kind2;    /* detailed type                  */
-    FT_Int         start;    /* index of first token character */
-    FT_Int         len;      /* length of token in chars       */
+    T1_TokenType  kind;     /* simple type                    */
+    T1_TokenType  kind2;    /* detailed type                  */
+    FT_Int        start;    /* index of first token character */
+    FT_Int        len;      /* length of token in chars       */
 
   } T1_Token;
 
 
-
-
-  typedef  struct  T1_TokenParser_
+  typedef struct  T1_TokenParser_
   {
-    FT_Memory   memory;
-    FT_Stream   stream;
+    FT_Memory  memory;
+    FT_Stream  stream;
 
-    FT_Bool     in_pfb;      /* true if PFB file, PFA otherwise */
-    FT_Bool     in_private;  /* true if in private dictionary   */
+    FT_Bool    in_pfb;      /* true if PFB file, PFA otherwise */
+    FT_Bool    in_private;  /* true if in private dictionary   */
 
-    FT_Byte*    base;        /* base address of current read buffer */
-    FT_Long     cursor;      /* current position in read buffer     */
-    FT_Long     limit;       /* limit of current read buffer        */
-    FT_Long     max;         /* maximum size of read buffer         */
+    FT_Byte*   base;        /* base address of current read buffer */
+    FT_Long    cursor;      /* current position in read buffer     */
+    FT_Long    limit;       /* limit of current read buffer        */
+    FT_Long    max;         /* maximum size of read buffer         */
 
-    FT_Error    error;       /* last error                          */
-    T1_Token    token;       /* last token read                     */
+    FT_Error   error;       /* last error                          */
+    T1_Token   token;       /* last token read                     */
 
   } T1_TokenParser;
 
 
+  /*************************************************************************/
+  /*                                                                       */
+  /* <Type>                                                                */
+  /*    T1_Tokenizer                                                       */
+  /*                                                                       */
+  /* <Description>                                                         */
+  /*    A handle to an object used to extract tokens from the input.  The  */
+  /*    object is able to perform PFA/PFB recognition, eexec decryption of */
+  /*    the private dictionary, as well as eexec decryption of the         */
+  /*    charstrings.                                                       */
+  /*                                                                       */
+  typedef T1_TokenParser*  T1_Tokenizer;
 
- /*************************************************************************/
- /*                                                                       */
- /*  <Type> T1_Tokenizer                                                  */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     A handle to an object used to extract tokens from the input.      */
- /*     The object is able to perform PFA/PFB recognition, eexec          */
- /*     decryption of the private dictionary, as well as eexec decryption */
- /*     of the charstrings..                                              */
- /*                                                                       */
-  typedef  T1_TokenParser*    T1_Tokenizer;
 
+  LOCAL_DEF
+  FT_Error  New_Tokenizer( FT_Stream      stream,
+                           T1_Tokenizer*  tokenizer );
 
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> New_Tokenizer                                             */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     Creates a new tokenizer from a given input stream. This function  */
- /*     automatically recognizes "pfa" or "pfb" files. The function       */
- /*     "Read_Token" can then be used to extract successive tokens from   */
- /*     the stream..                                                      */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     stream  :: input stream                                           */
- /*                                                                       */
- /*  <Output>                                                             */
- /*     tokenizer :: handle to new tokenizer object..                     */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
- /*  <Note>                                                               */
- /*     This function copies the stream handle within the object. Callers */
- /*     should not discard "stream". This is done by the Done_Tokenizer   */
- /*     function..                                                        */
- /*                                                                       */
- LOCAL_DEF
- FT_Error  New_Tokenizer( FT_Stream      stream,
-                          T1_Tokenizer*  tokenizer );
+  LOCAL_DEF
+  FT_Error  Done_Tokenizer( T1_Tokenizer  tokenizer );
 
+  LOCAL_DEF
+  FT_Error  Open_PrivateDict( T1_Tokenizer  tokenizer );
 
+  LOCAL_DEF
+  FT_Error  Read_Token( T1_Tokenizer  tokenizer );
 
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> Done_Tokenizer                                            */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     Closes a given tokenizer. This function will also close the       */
- /*     stream embedded in the object..                                   */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     tokenizer :: target tokenizer object                              */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
- LOCAL_DEF
- FT_Error  Done_Tokenizer( T1_Tokenizer  tokenizer );
 
-
-
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> Open_PrivateDict                                          */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     This function must be called to set the tokenizer to the private  */
- /*     section of the Type1 file. It recognizes automatically the        */
- /*     the kind of eexec encryption used (ascii or binary)..             */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     tokenizer :: target tokenizer object                              */
- /*     lenIV     :: value of the "lenIV" variable..                      */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
- LOCAL_DEF
- FT_Error  Open_PrivateDict( T1_Tokenizer  tokenizer );
-
-
-
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> Read_Token                                                */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     Read a new token from the current input stream. This function     */
- /*     extracts a token from the font program until "Open_PrivateDict"   */
- /*     has been called. After this, it returns tokens from the           */
- /*     (eexec-encrypted) private dictionnary..                           */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     tokenizer :: target tokenizer object                              */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
- /*  <Note>                                                               */
- /*     One should use the function Read_CharStrings to read the binary   */
- /*     charstrings from the private dict..                               */
- /*                                                                       */
- LOCAL_DEF
- FT_Error  Read_Token( T1_Tokenizer  tokenizer );
-
-
 #if 0
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> Read_CharStrings                                          */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     Read a charstrings from the current input stream. These are       */
- /*     binary bytes that encode each individual glyph outline.           */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     tokenizer :: target tokenizer object                              */
- /*     num_chars :: number of binary bytes to read                       */
- /*                                                                       */
- /*  <Output>                                                             */
- /*     buffer    :: target array of bytes. These are eexec-decrypted..   */
- /*                                                                       */
- /*  <Return>                                                             */
- /*     Type1 error code. 0 means success..                               */
- /*                                                                       */
- /*  <Note>                                                               */
- /*     One should use the function Read_CharStrings to read the binary   */
- /*     charstrings from the private dict..                               */
- /*                                                                       */
- LOCAL_DEF
- FT_Error  Read_CharStrings( T1_Tokenizer  tokenizer,
-                             FT_Int        num_chars,
-                             FT_Byte*      buffer );
-#endif
+  LOCAL_DEF
+  FT_Error  Read_CharStrings( T1_Tokenizer  tokenizer,
+                              FT_Int        num_chars,
+                              FT_Byte*      buffer );
+#endif /* 0 */
 
- /*************************************************************************/
- /*                                                                       */
- /*  <Function> t1_decrypt                                                */
- /*                                                                       */
- /*  <Description>                                                        */
- /*     Performs the Type 1 charstring decryption process..               */
- /*                                                                       */
- /*  <Input>                                                              */
- /*     buffer  :: base address of data to decrypt                        */
- /*     length  :: number of bytes to decrypt from base address           */
- /*     seed    :: ecnryption seed (4330 for charstrings).                */
- /*                                                                       */
   LOCAL_DEF
   void  t1_decrypt( FT_Byte*   buffer,
                     FT_Int     length,
@@ -351,3 +233,6 @@
                     FT_UShort  seed );
 
 #endif /* T1TOKENS_H */
+
+
+/* END */
--- a/src/type1/type1.c
+++ b/src/type1/type1.c
@@ -2,29 +2,17 @@
 /*                                                                         */
 /*  type1.c                                                                */
 /*                                                                         */
-/*  FreeType Type 1 driver component                                       */
+/*    FreeType Type 1 driver component (body only).                        */
 /*                                                                         */
-/*  Copyright 1996-1998 by                                                 */
+/*  Copyright 1996-2000 by                                                 */
 /*  David Turner, Robert Wilhelm, and Werner Lemberg.                      */
 /*                                                                         */
-/*  This file is part of the FreeType project, and may only be used        */
-/*  modified and distributed under the terms of the FreeType project       */
+/*  This file is part of the FreeType project, and may only be used,       */
+/*  modified, and distributed under the terms of the FreeType project      */
 /*  license, LICENSE.TXT.  By continuing to use, modify, or distribute     */
 /*  this file you indicate that you have read the license and              */
 /*  understand and accept it fully.                                        */
 /*                                                                         */
-/*                                                                         */
-/*  This file is used to compile the FreeType Type 1  font driver.         */
-/*  It relies on all components included in the "base" layer (see          */
-/*  the file "ftbase.c"). Source code is located in "freetype/ttlib"       */
-/*  and contains :                                                         */
-/*                                                                         */
-/*     - a driver interface                                                */
-/*     - an object manager                                                 */
-/*     - a table loader                                                    */
-/*     - a glyph loader                                                    */
-/*     - a glyph hinter                                                    */
-/*                                                                         */
 /***************************************************************************/
 
 
@@ -32,7 +20,7 @@
 
 #include <t1driver.c>
 #include <t1objs.c>
-#include <t1load.c>     /* table loader      */
+#include <t1load.c>
 #include <t1gload.c>
 #include <t1tokens.c>
 #include <t1parse.c>
@@ -44,3 +32,6 @@
 #ifndef T1_CONFIG_OPTION_NO_AFM
 #include <t1afm.c>
 #endif
+
+
+/* END */