Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 32 additions & 9 deletions pkg/server/resource/aws-bucket-files.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package resource
import (
"bytes"
"os"
"strings"
"sync"

"github.com/aws/aws-sdk-go-v2/aws"
Expand Down Expand Up @@ -118,6 +119,36 @@ func (r *BucketFiles) upload(client *s3.Client, bucketName string, files []Bucke
oldFilesMap[f.Key] = f
}

// Split files into HTML and non-HTML to upload non-HTML first.
// This avoids a race condition where CloudFront serves new HTML
// referencing assets that haven't been uploaded yet.
var htmlFiles, nonHtmlFiles []BucketFile
for _, file := range files {
oldFile, exists := oldFilesMap[file.Key]
if exists && oldFile.Hash != nil && *oldFile.Hash == *file.Hash &&
oldFile.CacheControl == file.CacheControl &&
oldFile.ContentType == file.ContentType {
continue
}
if strings.HasSuffix(file.Key, ".html") {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it be better just to break the files into these 2 groups instead of html and nonHtml:

  • updatedFiles: Files that have an existing key and have a different hash <--- upload these last
  • createdFiles: Files that do not have an existing key <---- upload these first

This should still solve the race issue, and avoids having to bring in and handle specific file extension logic.

htmlFiles = append(htmlFiles, file)
} else {
nonHtmlFiles = append(nonHtmlFiles, file)
}
}

if err := r.uploadFiles(client, bucketName, nonHtmlFiles); err != nil {
return err
}

return r.uploadFiles(client, bucketName, htmlFiles)
}

func (r *BucketFiles) uploadFiles(client *s3.Client, bucketName string, files []BucketFile) error {
if len(files) == 0 {
return nil
}

// Create channels for work distribution and error collection
filesChan := make(chan BucketFile)
errChan := make(chan error, len(files))
Expand All @@ -131,7 +162,6 @@ func (r *BucketFiles) upload(client *s3.Client, bucketName string, files []Bucke
defer wg.Done()
// Each worker processes files from the channel
for file := range filesChan {
// write start timestamp with nanoseconds to file
content, err := os.ReadFile(file.Source)
if err != nil {
errChan <- err
Expand All @@ -152,15 +182,9 @@ func (r *BucketFiles) upload(client *s3.Client, bucketName string, files []Bucke
}()
}

// Send files that need uploading to the channel
// Send files to the channel
go func() {
for _, file := range files {
oldFile, exists := oldFilesMap[file.Key]
if exists && oldFile.Hash != nil && *oldFile.Hash == *file.Hash &&
oldFile.CacheControl == file.CacheControl &&
oldFile.ContentType == file.ContentType {
continue
}
filesChan <- file
}
close(filesChan)
Expand Down Expand Up @@ -200,4 +224,3 @@ func (r *BucketFiles) purge(client *s3.Client, bucketName string, files []Bucket

return nil
}