shithub: hugo

Download patch

ref: 527cf1ab03fe4746885e90a197bc25decad88f89
parent: f4956d9aae69b1cb5715114cf5242fd80a9cabc7
author: Robert van Gent <[email protected]>
date: Fri May 3 05:30:46 EDT 2019

deploy: Support configuration of upload order

--- a/deploy/deploy.go
+++ b/deploy/deploy.go
@@ -23,7 +23,9 @@
 	"mime"
 	"os"
 	"path/filepath"
+	"regexp"
 	"runtime"
+	"sort"
 	"strings"
 	"sync"
 
@@ -45,14 +47,15 @@
 type Deployer struct {
 	localFs afero.Fs
 
-	target        *target    // the target to deploy to
-	matchers      []*matcher // matchers to apply to uploaded files
-	quiet         bool       // true reduces STDOUT
-	confirm       bool       // true enables confirmation before making changes
-	dryRun        bool       // true skips conformations and prints changes instead of applying them
-	force         bool       // true forces upload of all files
-	invalidateCDN bool       // true enables invalidate CDN cache (if possible)
-	maxDeletes    int        // caps the # of files to delete; -1 to disable
+	target        *target          // the target to deploy to
+	matchers      []*matcher       // matchers to apply to uploaded files
+	ordering      []*regexp.Regexp // orders uploads
+	quiet         bool             // true reduces STDOUT
+	confirm       bool             // true enables confirmation before making changes
+	dryRun        bool             // true skips conformations and prints changes instead of applying them
+	force         bool             // true forces upload of all files
+	invalidateCDN bool             // true enables invalidate CDN cache (if possible)
+	maxDeletes    int              // caps the # of files to delete; -1 to disable
 }
 
 // New constructs a new *Deployer.
@@ -79,6 +82,7 @@
 		localFs:       localFs,
 		target:        tgt,
 		matchers:      dcfg.Matchers,
+		ordering:      dcfg.ordering,
 		quiet:         cfg.GetBool("quiet"),
 		confirm:       cfg.GetBool("confirm"),
 		dryRun:        cfg.GetBool("dryRun"),
@@ -138,40 +142,55 @@
 		}
 	}
 
+	// Order the uploads. They are organized in groups; all uploads in a group
+	// must be complete before moving on to the next group.
+	uploadGroups := applyOrdering(d.ordering, uploads)
+
 	// Apply the changes in parallel, using an inverted worker
 	// pool (https://www.youtube.com/watch?v=5zXAHh5tJqQ&t=26m58s).
 	// sem prevents more than nParallel concurrent goroutines.
 	const nParallel = 10
-	sem := make(chan struct{}, nParallel)
 	var errs []error
 	var errMu sync.Mutex // protects errs
 
-	for _, upload := range uploads {
-		if d.dryRun {
-			if !d.quiet {
-				jww.FEEDBACK.Printf("[DRY RUN] Would upload: %v\n", upload)
-			}
+	for _, uploads := range uploadGroups {
+		// Short-circuit for an empty group.
+		if len(uploads) == 0 {
 			continue
 		}
 
-		// TODO: Add a progress indicator, as this can take a while
-		// depending on the number of files, upload speed, and size of the
-		// site.
-
-		sem <- struct{}{}
-		go func(upload *fileToUpload) {
-			if err := doSingleUpload(ctx, bucket, upload); err != nil {
-				errMu.Lock()
-				defer errMu.Unlock()
-				errs = append(errs, err)
+		// Within the group, apply uploads in parallel.
+		sem := make(chan struct{}, nParallel)
+		for _, upload := range uploads {
+			if d.dryRun {
+				if !d.quiet {
+					jww.FEEDBACK.Printf("[DRY RUN] Would upload: %v\n", upload)
+				}
+				continue
 			}
-			<-sem
-		}(upload)
+
+			sem <- struct{}{}
+			go func(upload *fileToUpload) {
+				if err := doSingleUpload(ctx, bucket, upload); err != nil {
+					errMu.Lock()
+					defer errMu.Unlock()
+					errs = append(errs, err)
+				}
+				<-sem
+			}(upload)
+		}
+		// Wait for all uploads in the group to finish.
+		for n := nParallel; n > 0; n-- {
+			sem <- struct{}{}
+		}
 	}
 
 	if d.maxDeletes != -1 && len(deletes) > d.maxDeletes {
 		jww.WARN.Printf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.maxDeletes)
 	} else {
+		// Apply deletes in parallel.
+		sort.Slice(deletes, func(i, j int) bool { return deletes[i] < deletes[j] })
+		sem := make(chan struct{}, nParallel)
 		for _, del := range deletes {
 			if d.dryRun {
 				if !d.quiet {
@@ -190,11 +209,11 @@
 				<-sem
 			}(del)
 		}
+		// Wait for all deletes to finish.
+		for n := nParallel; n > 0; n-- {
+			sem <- struct{}{}
+		}
 	}
-	// Wait for all uploads/deletes to finish.
-	for n := nParallel; n > 0; n-- {
-		sem <- struct{}{}
-	}
 	if len(errs) > 0 {
 		if !d.quiet {
 			jww.FEEDBACK.Printf("Encountered %d errors.\n", len(errs))
@@ -550,4 +569,37 @@
 		}
 	}
 	return uploads, deletes
+}
+
+// applyOrdering returns an ordered slice of slices of uploads.
+//
+// The returned slice will have length len(ordering)+1.
+//
+// The subslice at index i, for i = 0 ... len(ordering)-1, will have all of the
+// uploads whose Local.Path matched the regex at ordering[i] (but not any
+// previous ordering regex).
+// The subslice at index len(ordering) will have the remaining uploads that
+// didn't match any ordering regex.
+//
+// The subslices are sorted by Local.Path.
+func applyOrdering(ordering []*regexp.Regexp, uploads []*fileToUpload) [][]*fileToUpload {
+
+	// Sort the whole slice by Local.Path first.
+	sort.Slice(uploads, func(i, j int) bool { return uploads[i].Local.Path < uploads[j].Local.Path })
+
+	retval := make([][]*fileToUpload, len(ordering)+1)
+	for _, u := range uploads {
+		matched := false
+		for i, re := range ordering {
+			if re.MatchString(u.Local.Path) {
+				retval[i] = append(retval[i], u)
+				matched = true
+				break
+			}
+		}
+		if !matched {
+			retval[len(ordering)] = append(retval[len(ordering)], u)
+		}
+	}
+	return retval
 }
--- a/deploy/deployConfig.go
+++ b/deploy/deployConfig.go
@@ -27,6 +27,9 @@
 type deployConfig struct {
 	Targets  []*target
 	Matchers []*matcher
+	Order    []string
+
+	ordering []*regexp.Regexp // compiled Order
 }
 
 type target struct {
@@ -85,6 +88,13 @@
 		if err != nil {
 			return dcfg, fmt.Errorf("invalid deployment.matchers.pattern: %v", err)
 		}
+	}
+	for _, o := range dcfg.Order {
+		re, err := regexp.Compile(o)
+		if err != nil {
+			return dcfg, fmt.Errorf("invalid deployment.orderings.pattern: %v", err)
+		}
+		dcfg.ordering = append(dcfg.ordering, re)
 	}
 	return dcfg, nil
 }
--- a/deploy/deployConfig_test.go
+++ b/deploy/deployConfig_test.go
@@ -29,6 +29,9 @@
 someOtherValue = "foo"
 
 [deployment]
+
+order = ["o1", "o2"]
+
 [[deployment.targets]]
 Name = "name1"
 URL = "url1"
@@ -59,6 +62,11 @@
 	dcfg, err := decodeConfig(cfg)
 	assert.NoError(err)
 
+	assert.Equal(2, len(dcfg.Order))
+	assert.Equal("o1", dcfg.Order[0])
+	assert.Equal("o2", dcfg.Order[1])
+	assert.Equal(2, len(dcfg.ordering))
+
 	assert.Equal(2, len(dcfg.Targets))
 	assert.Equal("name1", dcfg.Targets[0].Name)
 	assert.Equal("url1", dcfg.Targets[0].URL)
@@ -69,11 +77,36 @@
 
 	assert.Equal(2, len(dcfg.Matchers))
 	assert.Equal("^pattern1$", dcfg.Matchers[0].Pattern)
+	assert.NotNil(dcfg.Matchers[0].re)
 	assert.Equal("cachecontrol1", dcfg.Matchers[0].CacheControl)
 	assert.Equal("contentencoding1", dcfg.Matchers[0].ContentEncoding)
 	assert.Equal("contenttype1", dcfg.Matchers[0].ContentType)
 	assert.True(dcfg.Matchers[0].Gzip)
 	assert.True(dcfg.Matchers[0].Force)
+	assert.Equal("^pattern2$", dcfg.Matchers[1].Pattern)
+	assert.NotNil(dcfg.Matchers[1].re)
+	assert.Equal("cachecontrol2", dcfg.Matchers[1].CacheControl)
+	assert.Equal("contentencoding2", dcfg.Matchers[1].ContentEncoding)
+	assert.Equal("contenttype2", dcfg.Matchers[1].ContentType)
+	assert.False(dcfg.Matchers[1].Gzip)
+	assert.False(dcfg.Matchers[1].Force)
+}
+
+func TestInvalidOrderingPattern(t *testing.T) {
+	assert := require.New(t)
+
+	tomlConfig := `
+
+someOtherValue = "foo"
+
+[deployment]
+order = ["["]  # invalid regular expression
+`
+	cfg, err := config.FromConfigString(tomlConfig, "toml")
+	assert.NoError(err)
+
+	_, err = decodeConfig(cfg)
+	assert.Error(err)
 }
 
 func TestInvalidMatcherPattern(t *testing.T) {
--- a/deploy/deploy_test.go
+++ b/deploy/deploy_test.go
@@ -19,6 +19,7 @@
 	"crypto/md5"
 	"io/ioutil"
 	"os"
+	"regexp"
 	"sort"
 	"testing"
 
@@ -174,11 +175,10 @@
 				remote[r.Key] = r
 			}
 			gotUpdates, gotDeletes := findDiffs(local, remote, tc.Force)
-			sort.Slice(gotUpdates, func(i, j int) bool { return gotUpdates[i].Local.Path < gotUpdates[j].Local.Path })
+			gotUpdates = applyOrdering(nil, gotUpdates)[0]
 			sort.Slice(gotDeletes, func(i, j int) bool { return gotDeletes[i] < gotDeletes[j] })
 			if diff := cmp.Diff(gotUpdates, tc.WantUpdates, cmpopts.IgnoreUnexported(localFile{})); diff != "" {
 				t.Errorf("updates differ:\n%s", diff)
-
 			}
 			if diff := cmp.Diff(gotDeletes, tc.WantDeletes); diff != "" {
 				t.Errorf("deletes differ:\n%s", diff)
@@ -302,6 +302,61 @@
 			}
 			if !bytes.Equal(gotContent, tc.WantContent) {
 				t.Errorf("got content %q want %q", string(gotContent), string(tc.WantContent))
+			}
+		})
+	}
+}
+
+func TestOrdering(t *testing.T) {
+	tests := []struct {
+		Description string
+		Uploads     []string
+		Ordering    []*regexp.Regexp
+		Want        [][]string
+	}{
+		{
+			Description: "empty",
+			Want:        [][]string{nil},
+		},
+		{
+			Description: "no ordering",
+			Uploads:     []string{"c", "b", "a", "d"},
+			Want:        [][]string{{"a", "b", "c", "d"}},
+		},
+		{
+			Description: "one ordering",
+			Uploads:     []string{"db", "c", "b", "a", "da"},
+			Ordering:    []*regexp.Regexp{regexp.MustCompile("^d")},
+			Want:        [][]string{{"da", "db"}, {"a", "b", "c"}},
+		},
+		{
+			Description: "two orderings",
+			Uploads:     []string{"db", "c", "b", "a", "da"},
+			Ordering: []*regexp.Regexp{
+				regexp.MustCompile("^d"),
+				regexp.MustCompile("^b"),
+			},
+			Want: [][]string{{"da", "db"}, {"b"}, {"a", "c"}},
+		},
+	}
+
+	for _, tc := range tests {
+		t.Run(tc.Description, func(t *testing.T) {
+			uploads := make([]*fileToUpload, len(tc.Uploads))
+			for i, u := range tc.Uploads {
+				uploads[i] = &fileToUpload{Local: &localFile{Path: u}}
+			}
+			gotUploads := applyOrdering(tc.Ordering, uploads)
+			var got [][]string
+			for _, subslice := range gotUploads {
+				var gotsubslice []string
+				for _, u := range subslice {
+					gotsubslice = append(gotsubslice, u.Local.Path)
+				}
+				got = append(got, gotsubslice)
+			}
+			if diff := cmp.Diff(got, tc.Want); diff != "" {
+				t.Error(diff)
 			}
 		})
 	}