shithub: hugo

Download patch

ref: 6636cf1bea77d20ef2a72a45fae59ac402fb133b
parent: f669ef6bec25155d015b6ab231c53caef4fa5cdc
author: Bjørn Erik Pedersen <[email protected]>
date: Tue Oct 23 10:37:09 EDT 2018

Resolve error handling/parser related TODOs

See #5324

--- a/hugofs/rootmapping_fs_test.go
+++ b/hugofs/rootmapping_fs_test.go
@@ -50,7 +50,7 @@
 	fif, err := rfs.Stat(filepath.Join("cf2", testfile))
 	assert.NoError(err)
 	assert.Equal("myfile.txt", fif.Name())
-	assert.Equal("f2t/myfile.txt", fif.(RealFilenameInfo).RealFilename())
+	assert.Equal(filepath.FromSlash("f2t/myfile.txt"), fif.(RealFilenameInfo).RealFilename())
 
 	root, err := rfs.Open(filepathSeparator)
 	assert.NoError(err)
--- a/hugolib/hugo_sites_build_errors_test.go
+++ b/hugolib/hugo_sites_build_errors_test.go
@@ -191,7 +191,7 @@
 			},
 			assertBuildError: func(a testSiteBuildErrorAsserter, err error) {
 				assert.Error(err)
-				assert.Contains(err.Error(), `"content/mytoml.md": render of "page" failed: execute of template failed: panic in Execute`)
+				assert.Contains(err.Error(), `execute of template failed: panic in Execute`)
 			},
 		},
 	}
--- a/hugolib/hugo_sites_build_test.go
+++ b/hugolib/hugo_sites_build_test.go
@@ -631,12 +631,10 @@
 	for _, p := range s.rawAllPages {
 		// No HTML when not processed
 		require.Equal(t, p.shouldBuild(), bytes.Contains(p.workContent, []byte("</")), p.BaseFileName()+": "+string(p.workContent))
-		// TODO(bep) 2errors
-		/*
-			require.Equal(t, p.shouldBuild(), p.content() != "", fmt.Sprintf("%v:%v", p.content(), p.shouldBuild()))
 
-			require.Equal(t, p.shouldBuild(), p.content() != "", p.BaseFileName())
-		*/
+		require.Equal(t, p.shouldBuild(), p.content() != "", fmt.Sprintf("%v:%v", p.content(), p.shouldBuild()))
+
+		require.Equal(t, p.shouldBuild(), p.content() != "", p.BaseFileName())
 
 	}
 }
--- a/hugolib/page.go
+++ b/hugolib/page.go
@@ -1685,9 +1685,13 @@
 	return found
 }
 
+// RawContent returns the un-rendered source content without
+// any leading front matter.
 func (p *Page) RawContent() string {
-	// TODO(bep) 2errors
-	return string(p.source.parsed.Input())
+	if p.source.posMainContent == -1 {
+		return ""
+	}
+	return string(p.source.parsed.Input()[p.source.posMainContent:])
 }
 
 func (p *Page) FullFilePath() string {
--- a/hugolib/page_content.go
+++ b/hugolib/page_content.go
@@ -46,11 +46,11 @@
 	hasSummaryDivider bool
 
 	// The AST of the parsed page. Contains information about:
-	// shortcBackup3odes, front matter, summary indicators.
-	// TODO(bep) 2errors add this to a new rawPagecContent struct
-	// with frontMatterItem (pos) etc.
-	// * RawContent, RawContentWithoutFrontMatter
+	// shortcodes, front matter, summary indicators.
 	parsed pageparser.Result
+
+	// Returns the position in bytes after any front matter.
+	posMainContent int
 }
 
 // TODO(bep) lazy consolidate
@@ -58,6 +58,7 @@
 	p.shortcodeState = newShortcodeHandler(p)
 	s := p.shortcodeState
 	p.renderable = true
+	p.source.posMainContent = -1
 
 	result := bp.GetBuffer()
 	defer bp.PutBuffer(result)
@@ -81,8 +82,8 @@
 		case it.Type == pageparser.TypeIgnore:
 		case it.Type == pageparser.TypeHTMLComment:
 			// Ignore. This is only a leading Front matter comment.
-		case it.Type == pageparser.TypeHTMLDocument:
-			// This is HTML only. No shortcode, front matter etc.
+		case it.Type == pageparser.TypeHTMLStart:
+			// This is HTML without front matter. It can still have shortcodes.
 			p.renderable = false
 			result.Write(it.Val)
 		case it.IsFrontMatter():
@@ -99,12 +100,17 @@
 				return err
 			}
 
+			next := iter.Peek()
+			if !next.IsDone() {
+				p.source.posMainContent = next.Pos
+			}
+
 			if !p.shouldBuild() {
 				// Nothing more to do.
 				return nil
 			}
 
-		case it.Type == pageparser.TypeLeadSummaryDivider, it.Type == pageparser.TypeSummaryDividerOrg:
+		case it.Type == pageparser.TypeLeadSummaryDivider:
 			result.Write(internalSummaryDivider)
 			p.source.hasSummaryDivider = true
 			// Need to determine if the page is truncated.
@@ -172,7 +178,6 @@
 		parsed: parseResult,
 	}
 
-	// TODO(bep) 2errors
 	p.lang = p.File.Lang()
 
 	if p.s != nil && p.s.owner != nil {
--- a/hugolib/page_test.go
+++ b/hugolib/page_test.go
@@ -767,8 +767,7 @@
 }
 
 // Issue #2601
-// TODO(bep) 2errors
-func _TestPageRawContent(t *testing.T) {
+func TestPageRawContent(t *testing.T) {
 	t.Parallel()
 	cfg, fs := newTestCfg()
 
@@ -784,7 +783,7 @@
 	require.Len(t, s.RegularPages, 1)
 	p := s.RegularPages[0]
 
-	require.Contains(t, p.RawContent(), "**Raw**")
+	require.Equal(t, p.RawContent(), "**Raw**")
 
 }
 
@@ -1042,8 +1041,7 @@
 	testAllMarkdownEnginesForPages(t, assertFunc, nil, simplePageWithAllCJKRunes)
 }
 
-// TODO(bep) 2errors
-func _TestWordCountWithAllCJKRunesHasCJKLanguage(t *testing.T) {
+func TestWordCountWithAllCJKRunesHasCJKLanguage(t *testing.T) {
 	t.Parallel()
 	settings := map[string]interface{}{"hasCJKLanguage": true}
 
@@ -1056,8 +1054,7 @@
 	testAllMarkdownEnginesForPages(t, assertFunc, settings, simplePageWithAllCJKRunes)
 }
 
-// TODO(bep) 2errors
-func _TestWordCountWithMainEnglishWithCJKRunes(t *testing.T) {
+func TestWordCountWithMainEnglishWithCJKRunes(t *testing.T) {
 	t.Parallel()
 	settings := map[string]interface{}{"hasCJKLanguage": true}
 
@@ -1164,7 +1161,6 @@
 		render bool
 	}{
 		{contentNoFrontmatter, true},
-		// TODO(bep) 2errors {invalidFrontmatterShortDelim, true},
 		{renderNoFrontmatter, false},
 		{contentWithCommentedFrontmatter, true},
 		{contentWithCommentedTextFrontmatter, true},
--- a/hugolib/site_render.go
+++ b/hugolib/site_render.go
@@ -134,8 +134,7 @@
 
 			if shouldRender {
 				if err := pageOutput.renderResources(); err != nil {
-					// TODO(bep) 2errors
-					s.Log.ERROR.Printf("Failed to render resources for page %q: %s", page, err)
+					s.SendError(page.errorf(err, "failed to render page resources"))
 					continue
 				}
 			}
@@ -147,7 +146,7 @@
 			} else {
 				layouts, err = s.layouts(pageOutput)
 				if err != nil {
-					s.Log.ERROR.Printf("Failed to resolve layout output %q for page %q: %s", outFormat.Name, page, err)
+					s.Log.ERROR.Printf("Failed to resolve layout for output %q for page %q: %s", outFormat.Name, page, err)
 					continue
 				}
 			}
--- a/hugolib/site_test.go
+++ b/hugolib/site_test.go
@@ -451,8 +451,7 @@
 
 }
 
-// TODO(bep) 2errors
-func _TestSkipRender(t *testing.T) {
+func TestSkipRender(t *testing.T) {
 	t.Parallel()
 	sources := [][2]string{
 		{filepath.FromSlash("sect/doc1.html"), "---\nmarkup: markdown\n---\n# title\nsome *content*"},
--- a/parser/pageparser/item.go
+++ b/parser/pageparser/item.go
@@ -103,10 +103,9 @@
 	tEOF
 
 	// page items
-	TypeHTMLDocument       // document starting with < as first non-whitespace
+	TypeHTMLStart          // document starting with < as first non-whitespace
 	TypeHTMLComment        // We ignore leading comments
-	TypeLeadSummaryDivider // <!--more-->
-	TypeSummaryDividerOrg  // # more
+	TypeLeadSummaryDivider // <!--more-->,  # more
 	TypeFrontMatterYAML
 	TypeFrontMatterTOML
 	TypeFrontMatterJSON
--- a/parser/pageparser/pagelexer.go
+++ b/parser/pageparser/pagelexer.go
@@ -48,6 +48,8 @@
 	start      int // item start position
 	width      int // width of last element
 
+	// The summary divider to look for.
+	summaryDivider []byte
 	// Set when we have parsed any summary divider
 	summaryDividerChecked bool
 
@@ -69,7 +71,6 @@
 
 // note: the input position here is normally 0 (start), but
 // can be set if position of first shortcode is known
-// TODO(bep) 2errors byte
 func newPageLexer(input []byte, inputPosition int, stateStart stateFunc) *pageLexer {
 	lexer := &pageLexer{
 		input:      input,
@@ -117,7 +118,7 @@
 	delimTOML         = []byte("+++")
 	delimYAML         = []byte("---")
 	delimOrg          = []byte("#+")
-	htmlCOmmentStart  = []byte("<!--")
+	htmlCommentStart  = []byte("<!--")
 	htmlCOmmentEnd    = []byte("-->")
 )
 
@@ -195,17 +196,18 @@
 
 func lexMainSection(l *pageLexer) stateFunc {
 	// Fast forward as far as possible.
-	var l1, l2, l3 int
-	if !l.summaryDividerChecked {
-		// TODO(bep) 2errors make the summary divider per type
-		l1 = l.index(summaryDivider)
-		l2 = l.index(summaryDividerOrg)
-		if l1 == -1 && l2 == -1 {
+	var l1, l2 int
+
+	if !l.summaryDividerChecked && l.summaryDivider != nil {
+		l1 = l.index(l.summaryDivider)
+		if l1 == -1 {
 			l.summaryDividerChecked = true
 		}
 	}
-	l3 = l.index(leftDelimSc)
-	skip := minPositiveIndex(l1, l2, l3)
+
+	l2 = l.index(leftDelimSc)
+	skip := minPositiveIndex(l1, l2)
+
 	if skip > 0 {
 		l.pos += skip
 	}
@@ -225,23 +227,14 @@
 			return lexShortcodeLeftDelim
 		}
 
-		if !l.summaryDividerChecked {
-			if l.hasPrefix(summaryDivider) {
+		if !l.summaryDividerChecked && l.summaryDivider != nil {
+			if l.hasPrefix(l.summaryDivider) {
 				if l.pos > l.start {
 					l.emit(tText)
 				}
 				l.summaryDividerChecked = true
-				l.pos += len(summaryDivider)
-				//l.consumeCRLF()
+				l.pos += len(l.summaryDivider)
 				l.emit(TypeLeadSummaryDivider)
-			} else if l.hasPrefix(summaryDividerOrg) {
-				if l.pos > l.start {
-					l.emit(tText)
-				}
-				l.summaryDividerChecked = true
-				l.pos += len(summaryDividerOrg)
-				//l.consumeCRLF()
-				l.emit(TypeSummaryDividerOrg)
 			}
 		}
 
@@ -261,6 +254,8 @@
 }
 
 func lexIntroSection(l *pageLexer) stateFunc {
+	l.summaryDivider = summaryDivider
+
 LOOP:
 	for {
 		r := l.next()
@@ -283,7 +278,7 @@
 			// No front matter.
 			if r == '<' {
 				l.backup()
-				if l.hasPrefix(htmlCOmmentStart) {
+				if l.hasPrefix(htmlCommentStart) {
 					right := l.index(htmlCOmmentEnd)
 					if right == -1 {
 						return l.errorf("starting HTML comment with no end")
@@ -291,10 +286,14 @@
 					l.pos += right + len(htmlCOmmentEnd)
 					l.emit(TypeHTMLComment)
 				} else {
-					// Not need to look further. Hugo treats this as plain HTML,
-					// no front matter, no shortcodes, no nothing.
-					l.pos = len(l.input)
-					l.emit(TypeHTMLDocument)
+					if l.pos > l.start {
+						l.emit(tText)
+					}
+					l.next()
+					// This is the start of a plain HTML document with no
+					// front matter. I still can contain shortcodes, so we
+					// have to keep looking.
+					l.emit(TypeHTMLStart)
 				}
 			}
 			break LOOP
@@ -365,10 +364,11 @@
 		#+DESCRIPTION: Just another golang parser for org content!
 	*/
 
+	l.summaryDivider = summaryDividerOrg
+
 	l.backup()
 
 	if !l.hasPrefix(delimOrg) {
-		// TODO(bep) consider error
 		return lexMainSection
 	}
 
--- a/parser/pageparser/pageparser.go
+++ b/parser/pageparser/pageparser.go
@@ -48,7 +48,7 @@
 }
 
 func parseMainSection(input []byte, from int) Result {
-	lexer := newPageLexer(input, from, lexMainSection) // TODO(bep) 2errors
+	lexer := newPageLexer(input, from, lexMainSection)
 	lexer.run()
 	return lexer
 }
--- a/parser/pageparser/pageparser_intro_test.go
+++ b/parser/pageparser/pageparser_intro_test.go
@@ -38,7 +38,7 @@
 	tstFrontMatterJSON     = nti(TypeFrontMatterJSON, tstJSON+"\r\n")
 	tstSomeText            = nti(tText, "\nSome text.\n")
 	tstSummaryDivider      = nti(TypeLeadSummaryDivider, "<!--more-->")
-	tstSummaryDividerOrg   = nti(TypeSummaryDividerOrg, "# more")
+	tstHtmlStart           = nti(TypeHTMLStart, "<")
 
 	tstORG = `
 #+TITLE: T1
@@ -54,8 +54,8 @@
 var frontMatterTests = []lexerTest{
 	{"empty", "", []Item{tstEOF}},
 	{"Byte order mark", "\ufeff\nSome text.\n", []Item{nti(TypeIgnore, "\ufeff"), tstSomeText, tstEOF}},
-	{"HTML Document", `  <html>  `, []Item{nti(TypeHTMLDocument, "  <html>  "), tstEOF}},
-	{"HTML Document 2", `<html><h1>Hugo Rocks</h1></html>`, []Item{nti(TypeHTMLDocument, "<html><h1>Hugo Rocks</h1></html>"), tstEOF}},
+	{"HTML Document", `  <html>  `, []Item{nti(tText, "  "), tstHtmlStart, nti(tText, "html>  "), tstEOF}},
+	{"HTML Document with shortcode", `<html>{{< sc1 >}}</html>`, []Item{tstHtmlStart, nti(tText, "html>"), tstLeftNoMD, tstSC1, tstRightNoMD, nti(tText, "</html>"), tstEOF}},
 	{"No front matter", "\nSome text.\n", []Item{tstSomeText, tstEOF}},
 	{"YAML front matter", "---\nfoo: \"bar\"\n---\n\nSome text.\n", []Item{tstFrontMatterYAML, tstSomeText, tstEOF}},
 	{"YAML empty front matter", "---\n---\n\nSome text.\n", []Item{nti(TypeFrontMatterYAML, ""), tstSomeText, tstEOF}},
@@ -65,7 +65,7 @@
 	{"TOML front matter", "+++\nfoo = \"bar\"\n+++\n\nSome text.\n", []Item{tstFrontMatterTOML, tstSomeText, tstEOF}},
 	{"JSON front matter", tstJSON + "\r\n\nSome text.\n", []Item{tstFrontMatterJSON, tstSomeText, tstEOF}},
 	{"ORG front matter", tstORG + "\nSome text.\n", []Item{tstFrontMatterORG, tstSomeText, tstEOF}},
-	{"Summary divider ORG", tstORG + "\nSome text.\n# more\nSome text.\n", []Item{tstFrontMatterORG, tstSomeText, tstSummaryDividerOrg, tstSomeText, tstEOF}},
+	{"Summary divider ORG", tstORG + "\nSome text.\n# more\nSome text.\n", []Item{tstFrontMatterORG, tstSomeText, nti(TypeLeadSummaryDivider, "# more"), tstSomeText, tstEOF}},
 	{"Summary divider", "+++\nfoo = \"bar\"\n+++\n\nSome text.\n<!--more-->\nSome text.\n", []Item{tstFrontMatterTOML, tstSomeText, tstSummaryDivider, tstSomeText, tstEOF}},
 }
 
--- a/tpl/data/data.go
+++ b/tpl/data/data.go
@@ -120,7 +120,6 @@
 
 	if err != nil {
 		return nil, _errors.Wrapf(err, "failed to get getJSON resource %q", url)
-		return nil, nil
 	}
 	return
 }
--- a/tpl/template.go
+++ b/tpl/template.go
@@ -179,7 +179,7 @@
 		}
 		return false
 	}
-	// TODO(bep) 2errors text vs HTML
+
 	fe, ok := herrors.WithFileContext(ferr, realFilename, f, lineMatcher)
 	if ok || !hasMaster {
 		return fe