ref: 6cda9c489d2cc4841932551363c87fbc647d5357
parent: 271b1e163310ba1df4df1cf30583eb7c8740482f
author: Werner Lemberg <[email protected]>
date: Thu Jan 22 04:07:12 EST 2004
* include/freetype/ftcache.h: Delete duplicated definition of FTC_FaceID. * src/cff/cffdrivr.c (cff_get_cmap_info): Call sfnt module's TT CMap Info service function if the cmap comes from sfnt. Return 0 if the cmap is sythesized in cff module. Formatting; updating copyright.
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,17 @@
+2004-01-20 Masatake YAMATO <[email protected]>
+
+ * include/freetype/ftcache.h: Delete duplicated definition of
+ FTC_FaceID.
+
+ * src/cff/cffdrivr.c (cff_get_cmap_info): Call sfnt module's TT CMap
+ Info service function if the cmap comes from sfnt. Return 0 if the
+ cmap is sythesized in cff module.
+
+2004-01-20 David Turner <[email protected]>
+
+ * src/cache/ftcmanag.c (ftc_size_node_compare): Call
+ FT_Activate_Size.
+
2004-01-20 Werner Lemberg <[email protected]>
* src/type1/t1parse.c (T1_Get_Private_Dict): Skip exactly one
@@ -5,36 +19,44 @@
2004-01-18 David Turner <[email protected]>
- * src/sfnt/ttsbit.c: removed compiler warning
+ * src/sfnt/ttsbit.c (tt_face_set_sbit_strike): Remove compiler
+ warning.
- * src/tools/docmaker/*: updating beautifier tool
+ * src/tools/docmaker/*: Updating beautifier tool.
2004-01-15 David Turner <[email protected]>
- * src/base/ftoutln.c (ft_orientation_extremum_compute): fixing
- infinite loop bug !
+ * src/base/ftoutln.c (ft_orientation_extremum_compute): Fix
+ infinite loop bug.
- * src/base/ftstroke.c, include/freetype/ftstroke.h: fixing bugs and
- adding FT_Glyph_Stroke and FT_Glyph_StrokerBorder APIs
+ * include/freetype/ftstroke.h: Include FT_GLYPH_H.
+ (FT_Stroker_Rewind, FT_Glyph_Stroke, FT_Glyph_StrokeBorder): New
+ declarations.
- * include/freetype/ftcache.h, include/freetype/cache/ftcmanag.h:
- adding FTC_Manager_LookupSize and FTC_Scaler to the public API
- (they were previously hidden)
+ * src/base/ftstroke.c: Include FT_INTERNAL_OBJECTS_H.
+ (FT_Outline_GetOutsideBorder): Inverse result.
+ (FT_Stroker_Rewind, FT_Glyph_Stroke, FT_GlyphStrokeBorder): New
+ functions.
+ (FT_Stroker_EndSubPath): Close path if needed.
+ (FT_Stroker_Set, FT_Stroker_ParseOutline): Use FT_Stroker_Rewind.
- * src/tools/docmaker/*: updating the DocMaker tool, adding a new
- tool named "docbeauty" to beautify the documentation comments
- (e.g. convert them to a single block border mode)
+ * include/freetype/cache/ftcmanag.h (FTC_ScalerRec,
+ FTC_Manager_LookupSize): Moved to...
+ * include/freetype/ftcache.h (FTC_ScalerRec,
+ FTC_Manager_LookupSize): Here.
+ * src/tools/docmaker/docbeauty.py: New file to beautify the
+ documentation comments (e.g., to convert them to single block border
+ mode).
+ * src/tools/docmaker/docmaker.py (file_exists, make_file_list):
+ Moved to...
+ * src/tools/docmaker/utils.py (file_exists, make_file_list): Here.
+
2004-01-14 David Turner <[email protected]>
- * include/freetype/internal/ftmemory.h,
- src/autohint/ahhint.c, src/base/ftgloadr.c,
- src/base/ftglyph.c, src/base/ftoutln.c,
- src/base/ftstroke.c, src/cff/cffload.c, src/truetype/ttgload.c,
- src/truetype/ttinterp.c:
-
- introducing the new FT_ARRAY_COPY and FT_ARRAY_MOVE macros
- to make copying arrays easier
+ * include/freetype/internal/ftmemory.h (FT_ARRAY_COPY,
+ FT_ARRAY_MOVE): New macros to make copying arrays easier.
+ Updated all relevant code to use them.
2004-01-14 Werner Lemberg <[email protected]>
--- a/include/freetype/cache/ftcmanag.h
+++ b/include/freetype/cache/ftcmanag.h
@@ -4,7 +4,7 @@
/* */
/* FreeType Cache Manager (specification). */
/* */
-/* Copyright 2000-2001, 2003 by */
+/* Copyright 2000-2001, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
--- a/include/freetype/ftcache.h
+++ b/include/freetype/ftcache.h
@@ -4,7 +4,7 @@
/* */
/* FreeType Cache subsystem (specification). */
/* */
-/* Copyright 1996-2001, 2002, 2003 by */
+/* Copyright 1996-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
@@ -356,40 +356,30 @@
/*************************************************************************/
/* */
- /* <Type> */
- /* FTC_Scaler */
- /* */
- /* <Description> */
- /* Handle to a @FTC_ScalerRec structure. */
- /* */
- typedef struct FTC_FaceIDRec_* FTC_FaceID;
-
-
- /*************************************************************************/
- /* */
/* <Struct> */
/* FTC_ScalerRec */
/* */
/* <Description> */
/* A structure used to describe a given character size in either */
- /* pixels or points to the cache manager. See @FTC_Manager_LookupSize */
+ /* pixels or points to the cache manager. See */
+ /* @FTC_Manager_LookupSize. */
/* */
/* <Fields> */
- /* face_id :: source face id */
+ /* face_id :: The source face ID. */
/* */
- /* width :: character width */
+ /* width :: The character width. */
/* */
- /* height :: character height */
+ /* height :: The character height. */
/* */
- /* pixel :: booelan. If TRUE, the "width" and "height" fields */
+ /* pixel :: A Boolean. If TRUE, the `width' and `height' fields */
/* are interpreted as integer pixel character sizes. */
- /* If false, they are expressed as 1/64th of points */
+ /* Otherwise, they are expressed as 1/64th of points. */
/* */
- /* x_res :: only used when 'pixel' is FALSE. indicates the */
- /* horizontal resolution in dpis */
+ /* x_res :: Only used when `pixel' is FALSE to indicate the */
+ /* horizontal resolution in dpi. */
/* */
- /* y_res :: only used when 'pixel' is FALSE. indicates the */
- /* vertical resolution in dpis */
+ /* y_res :: Only used when `pixel' is FALSE to indicate the */
+ /* vertical resolution in dpi. */
/* */
/* <Note> */
/* This type is mainly used to retrieve @FT_Size objects through the */
@@ -407,7 +397,6 @@
} FTC_ScalerRec, *FTC_Scaler;
-
/*************************************************************************/
/* */
/* <Function> */
@@ -414,13 +403,13 @@
/* FTC_Manager_LookupSize */
/* */
/* <Description> */
- /* Retrieves the @FT_Size object that corresponds to a given */
+ /* Retrieve the @FT_Size object that corresponds to a given */
/* @FTC_Scaler through a cache manager. */
/* */
/* <Input> */
/* manager :: A handle to the cache manager. */
/* */
- /* scaler :: scaler handle. */
+ /* scaler :: A scaler handle. */
/* */
/* <Output> */
/* asize :: A handle to the size object. */
@@ -430,10 +419,10 @@
/* */
/* <Note> */
/* The returned @FT_Size object is always owned by the manager. You */
- /* should never try to discard it yourself. */
+ /* should never try to discard it by yourself. */
/* */
- /* You can access the parent @FT_Face object simply as "size->face" */
- /* if you need it. Note that this object is also owner by the */
+ /* You can access the parent @FT_Face object simply as `size->face' */
+ /* if you need it. Note that this object is also owned by the */
/* manager. */
/* */
FT_EXPORT( FT_Error )
--- a/include/freetype/ftstroke.h
+++ b/include/freetype/ftstroke.h
@@ -4,7 +4,7 @@
/* */
/* FreeType path stroker (specification). */
/* */
-/* Copyright 2002, 2003 by */
+/* Copyright 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
@@ -242,8 +242,9 @@
*
* @description:
* Reset a stroker object without changing its attributes.
- * you should call this function before beginning a new
- * series of calls to @FT_Stroker_BeginSubPath/@FT_Stroker_EndSubPath
+ * You should call this function before beginning a new
+ * series of calls to @FT_Stroker_BeginSubPath or
+ * @FT_Stroker_EndSubPath.
*
* @input:
* stroker ::
@@ -284,7 +285,7 @@
* If `opened' is 1, the outline is processed as an open path, and the
* stroker will generate a single `stroke' outline.
*
- * this function calls @FT_Stroker_Rewind automatically
+ * This function calls @FT_Stroker_Rewind automatically.
*/
FT_EXPORT( FT_Error )
FT_Stroker_ParseOutline( FT_Stroker stroker,
@@ -612,10 +613,10 @@
* FT_Glyph_Stroke
*
* @description:
- * stroke a given outline glyph object with a given stroker
+ * Stroke a given outline glyph object with a given stroker.
*
* @inout:
- * pglyph :: source glyph handle on input, new glyph handle
+ * pglyph :: Source glyph handle on input, new glyph handle
* on output.
*
* @input:
@@ -622,14 +623,15 @@
* stroker ::
* A stroker handle.
*
- * destroy :: boolean. If TRUE, the source glyph object is destroyed
- * on success
+ * destroy ::
+ * A Boolean. If TRUE, the source glyph object is destroyed
+ * on success.
*
* @return:
- * FreeType error code. 0 means success
+ * FreeType error code. 0 means success.
*
* @note:
- * the source glyph is untouched in case of error.
+ * The source glyph is untouched in case of error.
*/
FT_EXPORT( FT_Error )
FT_Glyph_Stroke( FT_Glyph *pglyph,
@@ -643,28 +645,30 @@
* FT_Glyph_StrokeBorder
*
* @description:
- * stroke a given outline glyph object with a given stroker, but
- * only returns either its inside or outside border
+ * Stroke a given outline glyph object with a given stroker, but
+ * only return either its inside or outside border.
*
* @inout:
- * pglyph :: source glyph handle on input, new glyph handle
- * on output.
+ * pglyph ::
+ * Source glyph handle on input, new glyph handle on output.
*
* @input:
* stroker ::
* A stroker handle.
*
- * inside :: boolean. If TRUE, return the inside border; otherwise,
- * the outside border
+ * inside ::
+ * A Boolean. If TRUE, return the inside border, otherwise
+ * the outside border.
*
- * destroy :: boolean. If TRUE, the source glyph object is destroyed
- * on success
+ * destroy ::
+ * A Boolean. If TRUE, the source glyph object is destroyed
+ * on success.
*
* @return:
- * FreeType error code. 0 means success
+ * FreeType error code. 0 means success.
*
* @note:
- * the source glyph is untouched in case of error.
+ * The source glyph is untouched in case of error.
*/
FT_EXPORT( FT_Error )
FT_Glyph_StrokeBorder( FT_Glyph *pglyph,
--- a/include/freetype/internal/ftmemory.h
+++ b/include/freetype/internal/ftmemory.h
@@ -4,7 +4,7 @@
/* */
/* The FreeType memory management macros (specification). */
/* */
-/* Copyright 1996-2001, 2002 by */
+/* Copyright 1996-2001, 2002, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg */
/* */
/* This file is part of the FreeType project, and may only be used, */
@@ -180,11 +180,12 @@
#define FT_ZERO( p ) FT_MEM_ZERO( p, sizeof ( *(p) ) )
-#define FT_ARRAY_COPY( dest, source, count ) \
- FT_MEM_COPY( dest, source, (count)*sizeof(*(dest)) )
+#define FT_ARRAY_COPY( dest, source, count ) \
+ FT_MEM_COPY( dest, source, (count) * sizeof( *(dest) ) )
-#define FT_ARRAY_MOVE( dest, source, count ) \
- FT_MEM_MOVE( dest, source, (count)*sizeof(*(dest)) )
+#define FT_ARRAY_MOVE( dest, source, count ) \
+ FT_MEM_MOVE( dest, source, (count) * sizeof( *(dest) ) )
+
/*************************************************************************/
/* */
--- a/src/autohint/ahhint.c
+++ b/src/autohint/ahhint.c
@@ -4,7 +4,7 @@
/* */
/* Glyph hinter (body). */
/* */
-/* Copyright 2000-2001, 2002, 2003 Catharon Productions Inc. */
+/* Copyright 2000-2001, 2002, 2003, 2004 Catharon Productions Inc. */
/* Author: David Turner */
/* */
/* This file is part of the Catharon Typography Project and shall only */
--- a/src/base/ftgloadr.c
+++ b/src/base/ftgloadr.c
@@ -4,7 +4,7 @@
/* */
/* The FreeType glyph loader (body). */
/* */
-/* Copyright 2002, 2003 by */
+/* Copyright 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg */
/* */
/* This file is part of the FreeType project, and may only be used, */
--- a/src/base/ftglyph.c
+++ b/src/base/ftglyph.c
@@ -4,7 +4,7 @@
/* */
/* FreeType convenience functions to handle glyphs (body). */
/* */
-/* Copyright 1996-2001, 2002, 2003 by */
+/* Copyright 1996-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
--- a/src/base/ftoutln.c
+++ b/src/base/ftoutln.c
@@ -4,7 +4,7 @@
/* */
/* FreeType outline management (body). */
/* */
-/* Copyright 1996-2001, 2002, 2003 by */
+/* Copyright 1996-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
--- a/src/base/ftstroke.c
+++ b/src/base/ftstroke.c
@@ -4,7 +4,7 @@
/* */
/* FreeType path stroker (body). */
/* */
-/* Copyright 2002, 2003 by */
+/* Copyright 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
@@ -1416,8 +1416,7 @@
FT_Angle turn;
FT_Int inside_side;
- /* close the path if needed
- */
+ /* close the path if needed */
if ( stroker->center.x != stroker->subpath_start.x ||
stroker->center.y != stroker->subpath_start.y )
{
@@ -1747,9 +1746,9 @@
}
-
extern const FT_Glyph_Class ft_outline_glyph_class;
+
FT_EXPORT_DEF( FT_Error )
FT_Glyph_Stroke( FT_Glyph *pglyph,
FT_Stroker stroker,
@@ -1758,6 +1757,7 @@
FT_Error error = FT_Err_Invalid_Argument;
FT_Glyph glyph = NULL;
+
if ( pglyph == NULL )
goto Exit;
@@ -1768,6 +1768,7 @@
{
FT_Glyph copy;
+
error = FT_Glyph_Copy( glyph, © );
if ( error )
goto Exit;
@@ -1780,8 +1781,9 @@
FT_Outline* outline = &oglyph->outline;
FT_UInt num_points, num_contours;
+
error = FT_Stroker_ParseOutline( stroker, outline, 0 );
- if (error)
+ if ( error )
goto Fail;
(void)FT_Stroker_GetCounts( stroker, &num_points, &num_contours );
@@ -1788,7 +1790,8 @@
FT_Outline_Done( glyph->library, outline );
- error = FT_Outline_New( glyph->library, num_points, num_contours, outline );
+ error = FT_Outline_New( glyph->library,
+ num_points, num_contours, outline );
if ( error )
goto Fail;
@@ -1825,6 +1828,7 @@
FT_Error error = FT_Err_Invalid_Argument;
FT_Glyph glyph = NULL;
+
if ( pglyph == NULL )
goto Exit;
@@ -1835,6 +1839,7 @@
{
FT_Glyph copy;
+
error = FT_Glyph_Copy( glyph, © );
if ( error )
goto Exit;
@@ -1848,12 +1853,13 @@
FT_Outline* outline = &oglyph->outline;
FT_UInt num_points, num_contours;
+
border = FT_Outline_GetOutsideBorder( outline );
if ( inside )
- border = 1-border;
+ border = 1 - border;
error = FT_Stroker_ParseOutline( stroker, outline, 0 );
- if (error)
+ if ( error )
goto Fail;
(void)FT_Stroker_GetBorderCounts( stroker, border,
@@ -1890,7 +1896,6 @@
Exit:
return error;
}
-
/* END */
--- a/src/cache/ftcmanag.c
+++ b/src/cache/ftcmanag.c
@@ -4,7 +4,7 @@
/* */
/* FreeType Cache Manager (body). */
/* */
-/* Copyright 2000-2001, 2002, 2003 by */
+/* Copyright 2000-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
--- a/src/cff/cffdrivr.c
+++ b/src/cff/cffdrivr.c
@@ -29,6 +29,7 @@
#include "cffdrivr.h"
#include "cffgload.h"
#include "cffload.h"
+#include "cffcmap.h"
#include "cfferrs.h"
@@ -340,22 +341,40 @@
/*
- * (empty) TT CMAP INFO
+ * TT CMAP INFO
*
- * Hide TT CMAP INFO service defined in SFNT module;
- * just return 0.
+ * If the charmap is a synthetic Unicode encoding cmap or
+ * a Type 1 standard (or expert) encoding cmap, hide TT CMAP INFO
+ * service defined in SFNT module.
*
+ * Otherwise call the service function in the sfnt module.
+ *
*/
-
static FT_Error
cff_get_cmap_info( FT_CharMap charmap,
TT_CMapInfo *cmap_info )
{
- FT_UNUSED( charmap );
+ FT_CMap cmap = FT_CMAP( charmap );
+ FT_Error error = CFF_Err_Ok;
+
cmap_info->language = 0;
- return CFF_Err_Ok;
+ if ( cmap->clazz != &cff_cmap_encoding_class_rec &&
+ cmap->clazz != &cff_cmap_unicode_class_rec )
+ {
+ FT_Face face = FT_CMAP_FACE( cmap );
+ FT_Library library = FT_FACE_LIBRARY( face );
+ FT_Module sfnt = FT_Get_Module( library, "sfnt" );
+ FT_Service_TTCMaps service = ft_module_get_service (
+ sfnt, FT_SERVICE_ID_TT_CMAP );
+
+
+ if ( service && service->get_cmap_info )
+ error = service->get_cmap_info( charmap, cmap_info );
+ }
+
+ return error;
}
--- a/src/cff/cffload.c
+++ b/src/cff/cffload.c
@@ -4,7 +4,7 @@
/* */
/* OpenType and CFF data/program tables loader (body). */
/* */
-/* Copyright 1996-2001, 2002, 2003 by */
+/* Copyright 1996-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
--- a/src/cid/cidparse.c
+++ b/src/cid/cidparse.c
@@ -4,7 +4,7 @@
/* */
/* CID-keyed Type1 parser (body). */
/* */
-/* Copyright 1996-2001, 2002, 2003 by */
+/* Copyright 1996-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
@@ -59,6 +59,7 @@
FT_Byte buffer[256 + 10];
FT_Int buff_len;
FT_Byte *cur, *limit;
+ FT_Byte *arg1, *arg2;
FT_MEM_ZERO( parser, sizeof ( *parser ) );
@@ -135,16 +136,26 @@
parser->root.limit = parser->root.cursor + ps_len;
parser->num_dict = -1;
- /* finally we check whether `StartData' was real -- it could be */
- /* in a comment or string */
+ /* Finally, we check whether `StartData' was real -- it could be */
+ /* in a comment or string. We also get its arguments to find out */
+ /* whether the data is represented in binary or hex format. */
limit = parser->root.limit;
cur = parser->root.cursor;
+ arg1 = cur;
+ cid_parser_skip_PS_token( parser );
+ cid_parser_skip_spaces ( parser );
+ arg2 = cur;
+ cid_parser_skip_PS_token( parser );
+ cid_parser_skip_spaces ( parser );
+
while ( cur < limit )
{
if ( *cur == 'S' && ft_strncmp( (char*)cur, "StartData", 9 ) == 0 )
{
+ if ( ft_strncmp( (char*)arg1, "(Hex)", 5 ) == 0 )
+ parser->data_type = 1;
limit = parser->root.limit;
cur = parser->root.cursor;
goto Exit;
@@ -152,7 +163,9 @@
cid_parser_skip_PS_token( parser );
cid_parser_skip_spaces ( parser );
- cur = parser->root.cursor;
+ arg1 = arg2;
+ arg2 = cur;
+ cur = parser->root.cursor;
}
/* we haven't found the correct `StartData'; go back and continue */
--- a/src/cid/cidparse.h
+++ b/src/cid/cidparse.h
@@ -4,7 +4,7 @@
/* */
/* CID-keyed Type1 parser (specification). */
/* */
-/* Copyright 1996-2001, 2002, 2003 by */
+/* Copyright 1996-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
@@ -50,6 +50,9 @@
/* data_offset :: The start position of the binary data (i.e., the */
/* end of the data to be parsed. */
/* */
+ /* data_type :: If true, the binary data is represented in */
+ /* hexadecimal format. */
+ /* */
/* cid :: A structure which holds the information about */
/* the current font. */
/* */
@@ -64,6 +67,7 @@
FT_Long postscript_len;
FT_ULong data_offset;
+ FT_Bool data_type;
CID_FaceInfo cid;
FT_Int num_dict;
--- a/src/sfnt/ttsbit.c
+++ b/src/sfnt/ttsbit.c
@@ -4,7 +4,7 @@
/* */
/* TrueType and OpenType embedded bitmap support (body). */
/* */
-/* Copyright 1996-2001, 2002, 2003 by */
+/* Copyright 1996-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
@@ -636,7 +636,7 @@
for ( i = 0; i < face->num_sbit_strikes; i++ )
{
if ( ( (FT_UInt)face->sbit_strikes[i].y_ppem == y_ppem ) &&
- ( ( x_ppem == 0 ) ||
+ ( ( x_ppem == 0 ) ||
( (FT_UInt)face->sbit_strikes[i].x_ppem == x_ppem ) ) )
{
*astrike_index = i;
--- a/src/tools/docmaker/content.py
+++ b/src/tools/docmaker/content.py
@@ -1,585 +1,585 @@
-#
-# this file contains routines used to parse the content of documentation
-# comment block and build a more structured objects out of them
-#
-
-from sources import *
-from utils import *
-import string, re
-
-
-# this regular expresion is used to detect code sequences. these
-# are simply code fragments embedded in '{' and '}' like in:
-#
-# {
-# x = y + z;
-# if ( zookoo == 2 )
-# {
-# foobar();
-# }
-# }
-#
-# note that identation of the starting and ending accolades must be
-# exactly the same. the code sequence can contain accolades at greater
-# indentation
-#
-re_code_start = re.compile( r"(\s*){\s*$" )
-re_code_end = re.compile( r"(\s*)}\s*$" )
-
-
-# this regular expression is used to isolate identifiers from
-# other text
-#
-re_identifier = re.compile( r'(\w*)' )
-
-
-#############################################################################
-#
-# The DocCode class is used to store source code lines.
-#
-# 'self.lines' contains a set of source code lines that will be dumped as
-# HTML in a <PRE> tag.
-#
-# The object is filled line by line by the parser; it strips the leading
-# "margin" space from each input line before storing it in 'self.lines'.
-#
-class DocCode:
-
- def __init__( self, margin, lines ):
- self.lines = []
- self.words = None
-
- # remove margin spaces
- for l in lines:
- if string.strip( l[:margin] ) == "":
- l = l[margin:]
- self.lines.append( l )
-
- def dump( self, prefix = "", width=60 ):
- lines = self.dump_lines( 0, width )
- for l in lines:
- print prefix + l
-
- def dump_lines( self, margin=0, width=60 ):
- result = []
- for l in self.lines:
- result.append( " "*margin + l )
- return result
-
-
-
-#############################################################################
-#
-# The DocPara class is used to store "normal" text paragraph.
-#
-# 'self.words' contains the list of words that make up the paragraph
-#
-class DocPara:
-
- def __init__( self, lines ):
- self.lines = None
- self.words = []
- for l in lines:
- l = string.strip(l)
- self.words.extend( string.split( l ) )
-
- def dump( self, prefix = "", width = 60 ):
- lines = self.dump_lines( 0, width )
- for l in lines:
- print prefix + l
-
- def dump_lines( self, margin=0, width = 60 ):
- cur = "" # current line
- col = 0 # current width
- result = []
-
- for word in self.words:
- ln = len(word)
- if col > 0:
- ln = ln+1
-
- if col + ln > width:
- result.append( " "*margin + cur )
- cur = word
- col = len(word)
- else:
- if col > 0:
- cur = cur + " "
- cur = cur + word
- col = col + ln
-
- if col > 0:
- result.append( " "*margin + cur )
-
- return result
-
-
-
-
-#############################################################################
-#
-# The DocField class is used to store a list containing either DocPara or
-# DocCode objects. Each DocField also has an optional "name" which is used
-# when the object corresponds to a field of value definition
-#
-class DocField:
-
- def __init__( self, name, lines ):
-
- self.name = name # can be None for normal paragraphs/sources
- self.items = [] # list of items
-
- mode_none = 0 # start parsing mode
- mode_code = 1 # parsing code sequences
- mode_para = 3 # parsing normal paragraph
-
- margin = -1 # current code sequence indentation
- cur_lines = []
-
- # now analyze the markup lines to see if they contain paragraphs,
- # code sequences or fields definitions
- #
- start = 0
- mode = mode_none
- for l in lines:
-
- # are we parsing a code sequence ?
- if mode == mode_code:
-
- m = re_code_end.match( l )
- if m and len(m.group(1)) <= margin:
- # that's it, we finised the code sequence
- code = DocCode( 0, cur_lines )
- self.items.append( code )
- margin = -1
- cur_lines = []
- mode = mode_none
- else:
- # nope, continue the code sequence
- cur_lines.append( l[margin:] )
- else:
- # start of code sequence ?
- m = re_code_start.match( l )
- if m:
- # save current lines
- if cur_lines:
- para = DocPara( cur_lines )
- self.items.append( para )
- cur_lines = []
-
- # switch to code extraction mode
- margin = len(m.group(1))
- mode = mode_code
-
- else:
- if not string.split( l ) and cur_lines:
- # if the line is empty, we end the current paragraph,
- # if any
- para = DocPara( cur_lines )
- self.items.append( para )
- cur_lines = []
- else:
- # otherwise, simply add the line to the current
- # paragraph
- cur_lines.append( l )
-
- if mode == mode_code:
- # unexpected end of code sequence
- code = DocCode( margin, cur_lines )
- self.items.append( code )
-
- elif cur_lines:
- para = DocPara( cur_lines )
- self.items.append( para )
-
- def dump( self, prefix = "" ):
- if self.field:
- print prefix + self.field + " ::"
- prefix = prefix + "----"
-
- first = 1
- for p in self.items:
- if not first:
- print ""
- p.dump( prefix )
- first = 0
-
- def dump_lines( self, margin=0, width=60 ):
- result = []
- nl = None
- for p in self.items:
- if nl:
- result.append( "" )
-
- result.extend( p.dump_lines( margin, width ) )
- nl = 1
-
- return result
-
-# this regular expression is used to detect field definitions
-#
-re_field = re.compile( r"\s*(\w*)\s*::" )
-
-
-
-class DocMarkup:
-
- def __init__( self, tag, lines ):
- self.tag = string.lower(tag)
- self.fields = []
-
- cur_lines = []
- field = None
- mode = 0
-
- for l in lines:
- m = re_field.match( l )
- if m:
- # we detected the start of a new field definition
-
- # first, save the current one
- if cur_lines:
- f = DocField( field, cur_lines )
- self.fields.append( f )
- cur_lines = []
- field = None
-
- field = m.group(1) # record field name
- ln = len(m.group(0))
- l = " "*ln + l[ln:]
- cur_lines = [ l ]
- else:
- cur_lines.append( l )
-
- if field or cur_lines:
- f = DocField( field, cur_lines )
- self.fields.append( f )
-
- def get_name( self ):
- try:
- return self.fields[0].items[0].words[0]
-
- except:
- return None
-
- def get_start( self ):
- try:
- result = ""
- for word in self.fields[0].items[0].words:
- result = result + " " + word
- return result[1:]
-
- except:
- return "ERROR"
-
- def dump( self, margin ):
- print " "*margin + "<" + self.tag + ">"
- for f in self.fields:
- f.dump( " " )
- print " "*margin + "</" + self.tag + ">"
-
-
-
-
-class DocChapter:
-
- def __init__( self, block ):
- self.block = block
- self.sections = []
- if block:
- self.name = block.name
- self.title = block.get_markup_words( "title" )
- self.order = block.get_markup_words( "sections" )
- else:
- self.name = "Other"
- self.title = string.split( "Miscellaneous" )
- self.order = []
-
-
-
-class DocSection:
-
- def __init__( self, name = "Other" ):
- self.name = name
- self.blocks = {}
- self.block_names = [] # ordered block names in section
- self.defs = []
- self.abstract = ""
- self.description = ""
- self.order = []
- self.title = "ERROR"
- self.chapter = None
-
- def add_def( self, block ):
- self.defs.append( block )
-
- def add_block( self, block ):
- self.block_names.append( block.name )
- self.blocks[ block.name ] = block
-
- def process( self ):
- # lookup one block that contains a valid section description
- for block in self.defs:
- title = block.get_markup_text( "Title" )
- if title:
- self.title = title
- self.abstract = block.get_markup_words( "abstract" )
- self.description = block.get_markup_items( "description" )
- self.order = block.get_markup_words( "order" )
- return
-
- def reorder( self ):
-
- self.block_names = sort_order_list( self.block_names, self.order )
-
-
-class ContentProcessor:
-
- def __init__( self ):
- """initialize a block content processor"""
- self.reset()
-
- self.sections = {} # dictionary of documentation sections
- self.section = None # current documentation section
-
- self.chapters = [] # list of chapters
-
- def set_section( self, section_name ):
- """set current section during parsing"""
- if not self.sections.has_key( section_name ):
- section = DocSection( section_name )
- self.sections[ section_name ] = section
- self.section = section
- else:
- self.section = self.sections[ section_name ]
-
- def add_chapter( self, block ):
- chapter = DocChapter( block )
- self.chapters.append( chapter )
-
-
- def reset( self ):
- """reset the content processor for a new block"""
- self.markups = []
- self.markup = None
- self.markup_lines = []
-
- def add_markup( self ):
- """add a new markup section"""
- if self.markup and self.markup_lines:
-
- # get rid of last line of markup if it's empty
- marks = self.markup_lines
- if len(marks) > 0 and not string.strip(marks[-1]):
- self.markup_lines = marks[:-1]
-
- m = DocMarkup( self.markup, self.markup_lines )
-
- self.markups.append( m )
-
- self.markup = None
- self.markup_lines = []
-
-
- def process_content( self, content ):
- """process a block content and return a list of DocMarkup objects
- corresponding to it"""
- markup = None
- markup_lines = []
- first = 1
-
- for line in content:
- found = None
- for t in re_markup_tags:
- m = t.match( line )
- if m:
- found = string.lower(m.group(1))
- prefix = len(m.group(0))
- line = " "*prefix + line[prefix:] # remove markup from line
- break
-
- # is it the start of a new markup section ?
- if found:
- first = 0
- self.add_markup() # add current markup content
- self.markup = found
- if len(string.strip( line )) > 0:
- self.markup_lines.append( line )
- elif first == 0:
- self.markup_lines.append( line )
-
- self.add_markup()
-
- return self.markups
-
-
- def parse_sources( self, source_processor ):
- blocks = source_processor.blocks
- count = len(blocks)
- for n in range(count):
-
- source = blocks[n]
- if source.content:
- # this is a documentation comment, we need to catch
- # all following normal blocks in the "follow" list
- #
- follow = []
- m = n+1
- while m < count and not blocks[m].content:
- follow.append( blocks[m] )
- m = m+1
-
- doc_block = DocBlock( source, follow, self )
-
-
- def finish( self ):
-
- # process all sections to extract their abstract, description
- # and ordered list of items
- #
- for sec in self.sections.values():
- sec.process()
-
- # process chapters to check that all sections are correctly
- # listed there
- for chap in self.chapters:
- for sec in chap.order:
- if self.sections.has_key(sec):
- section = self.sections[ sec ]
- section.chapter = chap
- section.reorder()
- chap.sections.append( section )
- else:
- sys.stderr.write( "WARNING: chapter '" +
- chap.name + "' in " + chap.block.location() + \
- " lists unknown section '" + sec + "'\n" )
-
- # check that all sections are in a chapter
- #
- others = []
- for sec in self.sections.values():
- if not sec.chapter:
- others.append(sec)
-
- # create a new special chapter for all remaining sections
- # when necessary
- #
- if others:
- chap = DocChapter( None )
- chap.sections = others
- self.chapters.append( chap )
-
-
-
-class DocBlock:
-
- def __init__( self, source, follow, processor ):
-
- processor.reset()
-
- self.source = source
- self.code = []
- self.type = "ERRTYPE"
- self.name = "ERRNAME"
- self.section = processor.section
- self.markups = processor.process_content( source.content )
-
- # compute block type from first markup tag
- try:
- self.type = self.markups[0].tag
- except:
- pass
-
-
- # compute block name from first markup paragraph
- try:
- markup = self.markups[0]
- para = markup.fields[0].items[0]
- name = para.words[0]
- m = re_identifier.match( name )
- if m:
- name = m.group(1)
- self.name = name
- except:
- pass
-
- # detect new section starts
- if self.type == "section":
- processor.set_section( self.name )
- processor.section.add_def( self )
-
- # detect new chapter
- elif self.type == "chapter":
- processor.add_chapter( self )
-
- else:
- processor.section.add_block( self )
-
- # now, compute the source lines relevant to this documentation
- # block. We keep normal comments in for obvious reasons (??)
- source = []
- for b in follow:
- if b.format:
- break
- for l in b.lines:
- # we use "/* */" as a separator
- if re_source_sep.match( l ):
- break
- source.append( l )
-
- # now strip the leading and trailing empty lines from the sources
- start = 0
- end = len( source )-1
-
- while start < end and not string.strip( source[start] ):
- start = start + 1
-
- while start < end and not string.strip( source[end] ):
- end = end - 1
-
- source = source[start:end+1]
-
- self.code = source
-
-
- def location( self ):
- return self.source.location()
-
-
-
- def get_markup( self, tag_name ):
- """return the DocMarkup corresponding to a given tag in a block"""
- for m in self.markups:
- if m.tag == string.lower(tag_name):
- return m
- return None
-
-
- def get_markup_name( self, tag_name ):
- """return the name of a given primary markup in a block"""
- try:
- m = self.get_markup( tag_name )
- return m.get_name()
- except:
- return None
-
-
- def get_markup_words( self, tag_name ):
- try:
- m = self.get_markup( tag_name )
- return m.fields[0].items[0].words
- except:
- return []
-
-
- def get_markup_text( self, tag_name ):
- result = self.get_markup_words( tag_name )
- return string.join( result )
-
-
- def get_markup_items( self, tag_name ):
- try:
- m = self.get_markup( tag_name )
- return m.fields[0].items
- except:
- return None
\ No newline at end of file
+#
+# this file contains routines used to parse the content of documentation
+# comment block and build a more structured objects out of them
+#
+
+from sources import *
+from utils import *
+import string, re
+
+
+# this regular expresion is used to detect code sequences. these
+# are simply code fragments embedded in '{' and '}' like in:
+#
+# {
+# x = y + z;
+# if ( zookoo == 2 )
+# {
+# foobar();
+# }
+# }
+#
+# note that identation of the starting and ending accolades must be
+# exactly the same. the code sequence can contain accolades at greater
+# indentation
+#
+re_code_start = re.compile( r"(\s*){\s*$" )
+re_code_end = re.compile( r"(\s*)}\s*$" )
+
+
+# this regular expression is used to isolate identifiers from
+# other text
+#
+re_identifier = re.compile( r'(\w*)' )
+
+
+#############################################################################
+#
+# The DocCode class is used to store source code lines.
+#
+# 'self.lines' contains a set of source code lines that will be dumped as
+# HTML in a <PRE> tag.
+#
+# The object is filled line by line by the parser; it strips the leading
+# "margin" space from each input line before storing it in 'self.lines'.
+#
+class DocCode:
+
+ def __init__( self, margin, lines ):
+ self.lines = []
+ self.words = None
+
+ # remove margin spaces
+ for l in lines:
+ if string.strip( l[:margin] ) == "":
+ l = l[margin:]
+ self.lines.append( l )
+
+ def dump( self, prefix = "", width=60 ):
+ lines = self.dump_lines( 0, width )
+ for l in lines:
+ print prefix + l
+
+ def dump_lines( self, margin=0, width=60 ):
+ result = []
+ for l in self.lines:
+ result.append( " "*margin + l )
+ return result
+
+
+
+#############################################################################
+#
+# The DocPara class is used to store "normal" text paragraph.
+#
+# 'self.words' contains the list of words that make up the paragraph
+#
+class DocPara:
+
+ def __init__( self, lines ):
+ self.lines = None
+ self.words = []
+ for l in lines:
+ l = string.strip(l)
+ self.words.extend( string.split( l ) )
+
+ def dump( self, prefix = "", width = 60 ):
+ lines = self.dump_lines( 0, width )
+ for l in lines:
+ print prefix + l
+
+ def dump_lines( self, margin=0, width = 60 ):
+ cur = "" # current line
+ col = 0 # current width
+ result = []
+
+ for word in self.words:
+ ln = len(word)
+ if col > 0:
+ ln = ln+1
+
+ if col + ln > width:
+ result.append( " "*margin + cur )
+ cur = word
+ col = len(word)
+ else:
+ if col > 0:
+ cur = cur + " "
+ cur = cur + word
+ col = col + ln
+
+ if col > 0:
+ result.append( " "*margin + cur )
+
+ return result
+
+
+
+
+#############################################################################
+#
+# The DocField class is used to store a list containing either DocPara or
+# DocCode objects. Each DocField also has an optional "name" which is used
+# when the object corresponds to a field of value definition
+#
+class DocField:
+
+ def __init__( self, name, lines ):
+
+ self.name = name # can be None for normal paragraphs/sources
+ self.items = [] # list of items
+
+ mode_none = 0 # start parsing mode
+ mode_code = 1 # parsing code sequences
+ mode_para = 3 # parsing normal paragraph
+
+ margin = -1 # current code sequence indentation
+ cur_lines = []
+
+ # now analyze the markup lines to see if they contain paragraphs,
+ # code sequences or fields definitions
+ #
+ start = 0
+ mode = mode_none
+ for l in lines:
+
+ # are we parsing a code sequence ?
+ if mode == mode_code:
+
+ m = re_code_end.match( l )
+ if m and len(m.group(1)) <= margin:
+ # that's it, we finised the code sequence
+ code = DocCode( 0, cur_lines )
+ self.items.append( code )
+ margin = -1
+ cur_lines = []
+ mode = mode_none
+ else:
+ # nope, continue the code sequence
+ cur_lines.append( l[margin:] )
+ else:
+ # start of code sequence ?
+ m = re_code_start.match( l )
+ if m:
+ # save current lines
+ if cur_lines:
+ para = DocPara( cur_lines )
+ self.items.append( para )
+ cur_lines = []
+
+ # switch to code extraction mode
+ margin = len(m.group(1))
+ mode = mode_code
+
+ else:
+ if not string.split( l ) and cur_lines:
+ # if the line is empty, we end the current paragraph,
+ # if any
+ para = DocPara( cur_lines )
+ self.items.append( para )
+ cur_lines = []
+ else:
+ # otherwise, simply add the line to the current
+ # paragraph
+ cur_lines.append( l )
+
+ if mode == mode_code:
+ # unexpected end of code sequence
+ code = DocCode( margin, cur_lines )
+ self.items.append( code )
+
+ elif cur_lines:
+ para = DocPara( cur_lines )
+ self.items.append( para )
+
+ def dump( self, prefix = "" ):
+ if self.field:
+ print prefix + self.field + " ::"
+ prefix = prefix + "----"
+
+ first = 1
+ for p in self.items:
+ if not first:
+ print ""
+ p.dump( prefix )
+ first = 0
+
+ def dump_lines( self, margin=0, width=60 ):
+ result = []
+ nl = None
+ for p in self.items:
+ if nl:
+ result.append( "" )
+
+ result.extend( p.dump_lines( margin, width ) )
+ nl = 1
+
+ return result
+
+# this regular expression is used to detect field definitions
+#
+re_field = re.compile( r"\s*(\w*)\s*::" )
+
+
+
+class DocMarkup:
+
+ def __init__( self, tag, lines ):
+ self.tag = string.lower(tag)
+ self.fields = []
+
+ cur_lines = []
+ field = None
+ mode = 0
+
+ for l in lines:
+ m = re_field.match( l )
+ if m:
+ # we detected the start of a new field definition
+
+ # first, save the current one
+ if cur_lines:
+ f = DocField( field, cur_lines )
+ self.fields.append( f )
+ cur_lines = []
+ field = None
+
+ field = m.group(1) # record field name
+ ln = len(m.group(0))
+ l = " "*ln + l[ln:]
+ cur_lines = [ l ]
+ else:
+ cur_lines.append( l )
+
+ if field or cur_lines:
+ f = DocField( field, cur_lines )
+ self.fields.append( f )
+
+ def get_name( self ):
+ try:
+ return self.fields[0].items[0].words[0]
+
+ except:
+ return None
+
+ def get_start( self ):
+ try:
+ result = ""
+ for word in self.fields[0].items[0].words:
+ result = result + " " + word
+ return result[1:]
+
+ except:
+ return "ERROR"
+
+ def dump( self, margin ):
+ print " "*margin + "<" + self.tag + ">"
+ for f in self.fields:
+ f.dump( " " )
+ print " "*margin + "</" + self.tag + ">"
+
+
+
+
+class DocChapter:
+
+ def __init__( self, block ):
+ self.block = block
+ self.sections = []
+ if block:
+ self.name = block.name
+ self.title = block.get_markup_words( "title" )
+ self.order = block.get_markup_words( "sections" )
+ else:
+ self.name = "Other"
+ self.title = string.split( "Miscellaneous" )
+ self.order = []
+
+
+
+class DocSection:
+
+ def __init__( self, name = "Other" ):
+ self.name = name
+ self.blocks = {}
+ self.block_names = [] # ordered block names in section
+ self.defs = []
+ self.abstract = ""
+ self.description = ""
+ self.order = []
+ self.title = "ERROR"
+ self.chapter = None
+
+ def add_def( self, block ):
+ self.defs.append( block )
+
+ def add_block( self, block ):
+ self.block_names.append( block.name )
+ self.blocks[ block.name ] = block
+
+ def process( self ):
+ # lookup one block that contains a valid section description
+ for block in self.defs:
+ title = block.get_markup_text( "Title" )
+ if title:
+ self.title = title
+ self.abstract = block.get_markup_words( "abstract" )
+ self.description = block.get_markup_items( "description" )
+ self.order = block.get_markup_words( "order" )
+ return
+
+ def reorder( self ):
+
+ self.block_names = sort_order_list( self.block_names, self.order )
+
+
+class ContentProcessor:
+
+ def __init__( self ):
+ """initialize a block content processor"""
+ self.reset()
+
+ self.sections = {} # dictionary of documentation sections
+ self.section = None # current documentation section
+
+ self.chapters = [] # list of chapters
+
+ def set_section( self, section_name ):
+ """set current section during parsing"""
+ if not self.sections.has_key( section_name ):
+ section = DocSection( section_name )
+ self.sections[ section_name ] = section
+ self.section = section
+ else:
+ self.section = self.sections[ section_name ]
+
+ def add_chapter( self, block ):
+ chapter = DocChapter( block )
+ self.chapters.append( chapter )
+
+
+ def reset( self ):
+ """reset the content processor for a new block"""
+ self.markups = []
+ self.markup = None
+ self.markup_lines = []
+
+ def add_markup( self ):
+ """add a new markup section"""
+ if self.markup and self.markup_lines:
+
+ # get rid of last line of markup if it's empty
+ marks = self.markup_lines
+ if len(marks) > 0 and not string.strip(marks[-1]):
+ self.markup_lines = marks[:-1]
+
+ m = DocMarkup( self.markup, self.markup_lines )
+
+ self.markups.append( m )
+
+ self.markup = None
+ self.markup_lines = []
+
+
+ def process_content( self, content ):
+ """process a block content and return a list of DocMarkup objects
+ corresponding to it"""
+ markup = None
+ markup_lines = []
+ first = 1
+
+ for line in content:
+ found = None
+ for t in re_markup_tags:
+ m = t.match( line )
+ if m:
+ found = string.lower(m.group(1))
+ prefix = len(m.group(0))
+ line = " "*prefix + line[prefix:] # remove markup from line
+ break
+
+ # is it the start of a new markup section ?
+ if found:
+ first = 0
+ self.add_markup() # add current markup content
+ self.markup = found
+ if len(string.strip( line )) > 0:
+ self.markup_lines.append( line )
+ elif first == 0:
+ self.markup_lines.append( line )
+
+ self.add_markup()
+
+ return self.markups
+
+
+ def parse_sources( self, source_processor ):
+ blocks = source_processor.blocks
+ count = len(blocks)
+ for n in range(count):
+
+ source = blocks[n]
+ if source.content:
+ # this is a documentation comment, we need to catch
+ # all following normal blocks in the "follow" list
+ #
+ follow = []
+ m = n+1
+ while m < count and not blocks[m].content:
+ follow.append( blocks[m] )
+ m = m+1
+
+ doc_block = DocBlock( source, follow, self )
+
+
+ def finish( self ):
+
+ # process all sections to extract their abstract, description
+ # and ordered list of items
+ #
+ for sec in self.sections.values():
+ sec.process()
+
+ # process chapters to check that all sections are correctly
+ # listed there
+ for chap in self.chapters:
+ for sec in chap.order:
+ if self.sections.has_key(sec):
+ section = self.sections[ sec ]
+ section.chapter = chap
+ section.reorder()
+ chap.sections.append( section )
+ else:
+ sys.stderr.write( "WARNING: chapter '" +
+ chap.name + "' in " + chap.block.location() + \
+ " lists unknown section '" + sec + "'\n" )
+
+ # check that all sections are in a chapter
+ #
+ others = []
+ for sec in self.sections.values():
+ if not sec.chapter:
+ others.append(sec)
+
+ # create a new special chapter for all remaining sections
+ # when necessary
+ #
+ if others:
+ chap = DocChapter( None )
+ chap.sections = others
+ self.chapters.append( chap )
+
+
+
+class DocBlock:
+
+ def __init__( self, source, follow, processor ):
+
+ processor.reset()
+
+ self.source = source
+ self.code = []
+ self.type = "ERRTYPE"
+ self.name = "ERRNAME"
+ self.section = processor.section
+ self.markups = processor.process_content( source.content )
+
+ # compute block type from first markup tag
+ try:
+ self.type = self.markups[0].tag
+ except:
+ pass
+
+
+ # compute block name from first markup paragraph
+ try:
+ markup = self.markups[0]
+ para = markup.fields[0].items[0]
+ name = para.words[0]
+ m = re_identifier.match( name )
+ if m:
+ name = m.group(1)
+ self.name = name
+ except:
+ pass
+
+ # detect new section starts
+ if self.type == "section":
+ processor.set_section( self.name )
+ processor.section.add_def( self )
+
+ # detect new chapter
+ elif self.type == "chapter":
+ processor.add_chapter( self )
+
+ else:
+ processor.section.add_block( self )
+
+ # now, compute the source lines relevant to this documentation
+ # block. We keep normal comments in for obvious reasons (??)
+ source = []
+ for b in follow:
+ if b.format:
+ break
+ for l in b.lines:
+ # we use "/* */" as a separator
+ if re_source_sep.match( l ):
+ break
+ source.append( l )
+
+ # now strip the leading and trailing empty lines from the sources
+ start = 0
+ end = len( source )-1
+
+ while start < end and not string.strip( source[start] ):
+ start = start + 1
+
+ while start < end and not string.strip( source[end] ):
+ end = end - 1
+
+ source = source[start:end+1]
+
+ self.code = source
+
+
+ def location( self ):
+ return self.source.location()
+
+
+
+ def get_markup( self, tag_name ):
+ """return the DocMarkup corresponding to a given tag in a block"""
+ for m in self.markups:
+ if m.tag == string.lower(tag_name):
+ return m
+ return None
+
+
+ def get_markup_name( self, tag_name ):
+ """return the name of a given primary markup in a block"""
+ try:
+ m = self.get_markup( tag_name )
+ return m.get_name()
+ except:
+ return None
+
+
+ def get_markup_words( self, tag_name ):
+ try:
+ m = self.get_markup( tag_name )
+ return m.fields[0].items[0].words
+ except:
+ return []
+
+
+ def get_markup_text( self, tag_name ):
+ result = self.get_markup_words( tag_name )
+ return string.join( result )
+
+
+ def get_markup_items( self, tag_name ):
+ try:
+ m = self.get_markup( tag_name )
+ return m.fields[0].items
+ except:
+ return None
--- a/src/tools/docmaker/docbeauty.py
+++ b/src/tools/docmaker/docbeauty.py
@@ -1,109 +1,109 @@
-#!/usr/bin/env python
-#
-# DocBeauty 0.1 (c) 2003 David Turner <[email protected]>
-#
-# This program is used to beautify the documentation comments used
-# in the FreeType 2 public headers.
-#
-
-from sources import *
-from content import *
-from utils import *
-
-import utils
-
-import sys, os, time, string, getopt
-
-content_processor = ContentProcessor()
-
-
-def beautify_block( block ):
- if block.content:
- content_processor.reset()
-
- markups = content_processor.process_content( block.content )
- text = []
- first = 1
-
- for markup in markups:
- text.extend( markup.beautify( first ) )
- first = 0
-
- # now beautify the documentation "borders" themselves
- lines = [ " /*************************************************************************" ]
- for l in text:
- lines.append( " *" + l )
- lines.append( " */" )
-
- block.lines = lines
-
-
-def usage():
- print "\nDocBeauty 0.1 Usage information\n"
- print " docbeauty [options] file1 [ file2 ... ]\n"
- print "using the following options:\n"
- print " -h : print this page"
- print " -b : backup original files with the 'orig' extension"
- print ""
- print " --backup : same as -b"
-
-
-def main( argv ):
- """main program loop"""
-
- global output_dir
-
- try:
- opts, args = getopt.getopt( sys.argv[1:],
- "hb",
- [ "help", "backup" ] )
-
- except getopt.GetoptError:
- usage()
- sys.exit( 2 )
-
- if args == []:
- usage()
- sys.exit( 1 )
-
- # process options
- #
- output_dir = None
- do_backup = None
-
- for opt in opts:
- if opt[0] in ( "-h", "--help" ):
- usage()
- sys.exit( 0 )
-
- if opt[0] in ( "-b", "--backup" ):
- do_backup = 1
-
- # create context and processor
- source_processor = SourceProcessor()
-
- # retrieve the list of files to process
- file_list = make_file_list( args )
- for filename in file_list:
- source_processor.parse_file( filename )
- for block in source_processor.blocks:
- beautify_block( block )
- new_name = filename + ".new"
- ok = None
- try:
- file = open( new_name, "wt" )
- for block in source_processor.blocks:
- for line in block.lines:
- file.write( line )
- file.write( "\n" )
- file.close()
- except:
- ok = 0
-
-# if called from the command line
-#
-if __name__ == '__main__':
- main( sys.argv )
-
-
-# eof
+#!/usr/bin/env python
+#
+# DocBeauty (c) 2003, 2004 David Turner <[email protected]>
+#
+# This program is used to beautify the documentation comments used
+# in the FreeType 2 public headers.
+#
+
+from sources import *
+from content import *
+from utils import *
+
+import utils
+
+import sys, os, time, string, getopt
+
+content_processor = ContentProcessor()
+
+
+def beautify_block( block ):
+ if block.content:
+ content_processor.reset()
+
+ markups = content_processor.process_content( block.content )
+ text = []
+ first = 1
+
+ for markup in markups:
+ text.extend( markup.beautify( first ) )
+ first = 0
+
+ # now beautify the documentation "borders" themselves
+ lines = [ " /*************************************************************************" ]
+ for l in text:
+ lines.append( " *" + l )
+ lines.append( " */" )
+
+ block.lines = lines
+
+
+def usage():
+ print "\nDocBeauty 0.1 Usage information\n"
+ print " docbeauty [options] file1 [ file2 ... ]\n"
+ print "using the following options:\n"
+ print " -h : print this page"
+ print " -b : backup original files with the 'orig' extension"
+ print ""
+ print " --backup : same as -b"
+
+
+def main( argv ):
+ """main program loop"""
+
+ global output_dir
+
+ try:
+ opts, args = getopt.getopt( sys.argv[1:],
+ "hb",
+ [ "help", "backup" ] )
+
+ except getopt.GetoptError:
+ usage()
+ sys.exit( 2 )
+
+ if args == []:
+ usage()
+ sys.exit( 1 )
+
+ # process options
+ #
+ output_dir = None
+ do_backup = None
+
+ for opt in opts:
+ if opt[0] in ( "-h", "--help" ):
+ usage()
+ sys.exit( 0 )
+
+ if opt[0] in ( "-b", "--backup" ):
+ do_backup = 1
+
+ # create context and processor
+ source_processor = SourceProcessor()
+
+ # retrieve the list of files to process
+ file_list = make_file_list( args )
+ for filename in file_list:
+ source_processor.parse_file( filename )
+ for block in source_processor.blocks:
+ beautify_block( block )
+ new_name = filename + ".new"
+ ok = None
+ try:
+ file = open( new_name, "wt" )
+ for block in source_processor.blocks:
+ for line in block.lines:
+ file.write( line )
+ file.write( "\n" )
+ file.close()
+ except:
+ ok = 0
+
+# if called from the command line
+#
+if __name__ == '__main__':
+ main( sys.argv )
+
+
+# eof
--- a/src/tools/docmaker/docmaker.py
+++ b/src/tools/docmaker/docmaker.py
@@ -1,16 +1,16 @@
#!/usr/bin/env python
#
-# DocMaker 0.2 (c) 2002 David Turner <[email protected]>
+# DocMaker (c) 2002, 2004 David Turner <[email protected]>
#
# This program is a re-write of the original DocMaker took used
# to generate the API Reference of the FreeType font engine
-# by converting in-source comments into structured HTML
+# by converting in-source comments into structured HTML.
#
# This new version is capable of outputting XML data, as well
-# as accepts more liberal formatting options
+# as accepts more liberal formatting options.
#
# It also uses regular expression matching and substitution
-# to speed things significantly
+# to speed things significantly.
#
from sources import *
@@ -25,7 +25,7 @@
def usage():
- print "\nDocMaker 0.2 Usage information\n"
+ print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [ file2 ... ]\n"
print "using the following options:\n"
print " -h : print this page"
--- a/src/tools/docmaker/formatter.py
+++ b/src/tools/docmaker/formatter.py
@@ -1,15 +1,15 @@
from sources import *
from content import *
from utils import *
-
-# This is the base Formatter class. its purpose is to convert
-# a content processor's data into specific documents (i.e. table of
-# contents, global index, and individual API reference indices).
-#
-# You'll need to sub-class it to output anything sensible. For example,
-# the file tohtml.py contains the definition of the HtmlFormatter sub-class
-# used to output, you guessed it, HTML !
-#
+
+# This is the base Formatter class. its purpose is to convert
+# a content processor's data into specific documents (i.e. table of
+# contents, global index, and individual API reference indices).
+#
+# You'll need to sub-class it to output anything sensible. For example,
+# the file tohtml.py contains the definition of the HtmlFormatter sub-class
+# used to output, you guessed it, HTML.
+#
class Formatter:
--- a/src/tools/docmaker/sources.py
+++ b/src/tools/docmaker/sources.py
@@ -223,7 +223,7 @@
if len(l) > 0:
for tag in re_markup_tags:
if tag.match( l ):
- self.content = lines
+ self.content = lines
return
def location( self ):
@@ -328,9 +328,9 @@
"""process a normal line and check if it's the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
- self.add_block_lines()
+ self.add_block_lines()
self.format = f
- self.lineno = fileinput.filelineno()
+ self.lineno = fileinput.filelineno()
self.lines.append( line )
--- a/src/tools/docmaker/utils.py
+++ b/src/tools/docmaker/utils.py
@@ -85,7 +85,7 @@
sys.exit( 2 )
else:
output_dir = None
-
+
def file_exists( pathname ):
"""checks that a given file exists"""
result = 1
--- a/src/truetype/ttgload.c
+++ b/src/truetype/ttgload.c
@@ -4,7 +4,7 @@
/* */
/* TrueType Glyph Loader (body). */
/* */
-/* Copyright 1996-2001, 2002, 2003 by */
+/* Copyright 1996-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
--- a/src/truetype/ttinterp.c
+++ b/src/truetype/ttinterp.c
@@ -4,7 +4,7 @@
/* */
/* TrueType bytecode interpreter (body). */
/* */
-/* Copyright 1996-2001, 2002, 2003 by */
+/* Copyright 1996-2001, 2002, 2003, 2004 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */