mirror of
https://github.com/go-vikunja/vikunja.git
synced 2026-04-24 22:25:15 +00:00
fix(files): upload should work with vhost style (#1994)
resolves https://github.com/go-vikunja/vikunja/issues/1905
This commit is contained in:
@@ -168,6 +168,7 @@ const (
|
||||
FilesS3AccessKey Key = `files.s3.accesskey`
|
||||
FilesS3SecretKey Key = `files.s3.secretkey`
|
||||
FilesS3UsePathStyle Key = `files.s3.usepathstyle`
|
||||
FilesS3TempDir Key = `files.s3.tempdir`
|
||||
|
||||
MigrationTodoistEnable Key = `migration.todoist.enable`
|
||||
MigrationTodoistClientID Key = `migration.todoist.clientid`
|
||||
@@ -444,6 +445,7 @@ func InitDefaultConfig() {
|
||||
FilesS3AccessKey.setDefault("")
|
||||
FilesS3SecretKey.setDefault("")
|
||||
FilesS3UsePathStyle.setDefault(false)
|
||||
FilesS3TempDir.setDefault("")
|
||||
// Cors
|
||||
CorsEnable.setDefault(true)
|
||||
CorsOrigins.setDefault([]string{"http://127.0.0.1:*", "http://localhost:*"})
|
||||
|
||||
@@ -32,7 +32,8 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws" //nolint:staticcheck // afero-s3 still requires aws-sdk-go v1
|
||||
"github.com/aws/aws-sdk-go/aws/credentials" //nolint:staticcheck // afero-s3 still requires aws-sdk-go v1
|
||||
"github.com/aws/aws-sdk-go/aws/session" //nolint:staticcheck // afero-s3 still requires aws-sdk-go v1
|
||||
s3 "github.com/fclairamb/afero-s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3" //nolint:staticcheck // afero-s3 still requires aws-sdk-go v1
|
||||
aferos3 "github.com/fclairamb/afero-s3"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -41,6 +42,14 @@ import (
|
||||
var fs afero.Fs
|
||||
var afs *afero.Afero
|
||||
|
||||
// S3 client and bucket for direct uploads with Content-Length
|
||||
type s3PutObjectClient interface {
|
||||
PutObject(input *s3.PutObjectInput) (*s3.PutObjectOutput, error)
|
||||
}
|
||||
|
||||
var s3Client s3PutObjectClient
|
||||
var s3Bucket string
|
||||
|
||||
func setDefaultLocalConfig() {
|
||||
if !strings.HasPrefix(config.FilesBasePath.GetString(), "/") {
|
||||
config.FilesBasePath.Set(filepath.Join(
|
||||
@@ -84,9 +93,13 @@ func initS3FileHandler() error {
|
||||
}
|
||||
|
||||
// Initialize S3 filesystem using afero-s3
|
||||
fs = s3.NewFs(bucket, sess)
|
||||
fs = aferos3.NewFs(bucket, sess)
|
||||
afs = &afero.Afero{Fs: fs}
|
||||
|
||||
// Store S3 client and bucket for direct uploads with Content-Length
|
||||
s3Client = s3.New(sess)
|
||||
s3Bucket = bucket
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,9 @@ package files
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -31,6 +33,8 @@ import (
|
||||
"code.vikunja.io/api/pkg/modules/keyvalue"
|
||||
|
||||
"code.vikunja.io/api/pkg/web"
|
||||
"github.com/aws/aws-sdk-go/aws" //nolint:staticcheck // afero-s3 still requires aws-sdk-go v1
|
||||
"github.com/aws/aws-sdk-go/service/s3" //nolint:staticcheck // afero-s3 still requires aws-sdk-go v1
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/spf13/afero"
|
||||
"xorm.io/xorm"
|
||||
@@ -150,10 +154,134 @@ func (f *File) Delete(s *xorm.Session) (err error) {
|
||||
|
||||
// Save saves a file to storage
|
||||
func (f *File) Save(fcontent io.Reader) (err error) {
|
||||
err = afs.WriteReader(f.getAbsoluteFilePath(), fcontent)
|
||||
|
||||
if s3Client == nil {
|
||||
err = afs.WriteReader(f.getAbsoluteFilePath(), fcontent)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save file: %w", err)
|
||||
}
|
||||
|
||||
return keyvalue.IncrBy(metrics.FilesCountKey, 1)
|
||||
}
|
||||
|
||||
// For S3 storage, use PutObject directly with Content-Length to enable streaming
|
||||
// without buffering the entire file in memory. Some S3-compatible services
|
||||
// (like MinIO) require Content-Length to be set explicitly.
|
||||
body, contentLength, cleanup, err := prepareS3UploadBody(fcontent, f.Size)
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
if cleanup != nil {
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
_, err = s3Client.PutObject(&s3.PutObjectInput{
|
||||
Bucket: aws.String(s3Bucket),
|
||||
Key: aws.String(f.getAbsoluteFilePath()),
|
||||
Body: body,
|
||||
ContentLength: aws.Int64(contentLength),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file to S3: %w", err)
|
||||
}
|
||||
|
||||
return keyvalue.IncrBy(metrics.FilesCountKey, 1)
|
||||
}
|
||||
|
||||
func prepareS3UploadBody(fcontent io.Reader, expectedSize uint64) (body io.ReadSeeker, contentLength int64, cleanup func(), err error) {
|
||||
if seeker, ok := fcontent.(io.ReadSeeker); ok {
|
||||
contentLength, err = contentLengthFromReadSeeker(seeker, expectedSize)
|
||||
if err != nil {
|
||||
return nil, 0, nil, fmt.Errorf("failed to determine S3 upload content length: %w", err)
|
||||
}
|
||||
|
||||
_, err = seeker.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, 0, nil, fmt.Errorf("failed to seek S3 upload body to start: %w", err)
|
||||
}
|
||||
|
||||
return seeker, contentLength, nil, nil
|
||||
}
|
||||
|
||||
tempFile, tempPath, err := createS3TempFile()
|
||||
if err != nil {
|
||||
return nil, 0, nil, fmt.Errorf("failed to create temp file for S3 upload: %w", err)
|
||||
}
|
||||
|
||||
cleanup = func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempPath)
|
||||
}
|
||||
|
||||
written, err := io.Copy(tempFile, fcontent)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, 0, nil, fmt.Errorf("failed to buffer S3 upload to temp file: %w", err)
|
||||
}
|
||||
|
||||
if expectedSize > 0 {
|
||||
if expectedSize > uint64(math.MaxInt64) {
|
||||
log.Warningf("File size mismatch for S3 upload: expected size %d bytes does not fit into int64", expectedSize)
|
||||
} else if written != int64(expectedSize) {
|
||||
log.Warningf("File size mismatch for S3 upload: expected %d bytes but buffered %d bytes", expectedSize, written)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, 0, nil, fmt.Errorf("failed to seek temp file for S3 upload: %w", err)
|
||||
}
|
||||
|
||||
return tempFile, written, cleanup, nil
|
||||
}
|
||||
|
||||
func contentLengthFromReadSeeker(seeker io.ReadSeeker, expectedSize uint64) (int64, error) {
|
||||
currentOffset, err := seeker.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
endOffset, err := seeker.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, err = seeker.Seek(currentOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if expectedSize > 0 && expectedSize <= uint64(math.MaxInt64) && endOffset != int64(expectedSize) {
|
||||
log.Warningf("File size mismatch for S3 upload: expected %d bytes but reader reports %d bytes", expectedSize, endOffset)
|
||||
}
|
||||
|
||||
return endOffset, nil
|
||||
}
|
||||
|
||||
func createS3TempFile() (file *os.File, path string, err error) {
|
||||
dir := config.FilesS3TempDir.GetString()
|
||||
|
||||
tryCreate := func(tempDir string) (*os.File, error) {
|
||||
return os.CreateTemp(tempDir, "vikunja-s3-upload-*")
|
||||
}
|
||||
|
||||
if dir != "" {
|
||||
file, err = tryCreate(dir)
|
||||
if err == nil {
|
||||
return file, file.Name(), nil
|
||||
}
|
||||
}
|
||||
|
||||
file, err = tryCreate("")
|
||||
if err == nil {
|
||||
return file, file.Name(), nil
|
||||
}
|
||||
|
||||
file, err = tryCreate(".")
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return file, file.Name(), nil
|
||||
}
|
||||
|
||||
@@ -18,12 +18,14 @@ package files
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"code.vikunja.io/api/pkg/config"
|
||||
"code.vikunja.io/api/pkg/db"
|
||||
"github.com/aws/aws-sdk-go/service/s3" //nolint:staticcheck // afero-s3 still requires aws-sdk-go v1
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -295,3 +297,153 @@ func TestInitFileHandler_LocalFilesystem(t *testing.T) {
|
||||
// Verify that afs is initialized
|
||||
assert.NotNil(t, afs)
|
||||
}
|
||||
|
||||
type fakeS3PutObjectClient struct {
|
||||
lastInput *s3.PutObjectInput
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeS3PutObjectClient) PutObject(input *s3.PutObjectInput) (*s3.PutObjectOutput, error) {
|
||||
f.lastInput = input
|
||||
if f.err != nil {
|
||||
return nil, f.err
|
||||
}
|
||||
return &s3.PutObjectOutput{}, nil
|
||||
}
|
||||
|
||||
type readerOnly struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r *readerOnly) Read(p []byte) (int, error) {
|
||||
return r.r.Read(p)
|
||||
}
|
||||
|
||||
func TestFileSave_S3_UsesSeekableReaderWithoutTempFile(t *testing.T) {
|
||||
originalClient := s3Client
|
||||
originalBucket := s3Bucket
|
||||
originalTempDir := config.FilesS3TempDir.GetString()
|
||||
t.Cleanup(func() {
|
||||
s3Client = originalClient
|
||||
s3Bucket = originalBucket
|
||||
config.FilesS3TempDir.Set(originalTempDir)
|
||||
})
|
||||
|
||||
tempDir := t.TempDir()
|
||||
config.FilesS3TempDir.Set(tempDir)
|
||||
|
||||
client := &fakeS3PutObjectClient{}
|
||||
s3Client = client
|
||||
s3Bucket = "test-bucket"
|
||||
|
||||
content := []byte("seekable-content")
|
||||
file := &File{ID: 123, Size: uint64(len(content))}
|
||||
|
||||
err := file.Save(bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, client.lastInput)
|
||||
assert.Equal(t, "test-bucket", *client.lastInput.Bucket)
|
||||
assert.Equal(t, file.getAbsoluteFilePath(), *client.lastInput.Key)
|
||||
require.NotNil(t, client.lastInput.ContentLength)
|
||||
assert.Equal(t, int64(len(content)), *client.lastInput.ContentLength)
|
||||
assert.IsType(t, &bytes.Reader{}, client.lastInput.Body)
|
||||
|
||||
entries, err := os.ReadDir(tempDir)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, entries)
|
||||
}
|
||||
|
||||
func TestFileSave_S3_BuffersNonSeekableReaderAndCleansUpTempFile(t *testing.T) {
|
||||
originalClient := s3Client
|
||||
originalBucket := s3Bucket
|
||||
originalTempDir := config.FilesS3TempDir.GetString()
|
||||
t.Cleanup(func() {
|
||||
s3Client = originalClient
|
||||
s3Bucket = originalBucket
|
||||
config.FilesS3TempDir.Set(originalTempDir)
|
||||
})
|
||||
|
||||
tempDir := t.TempDir()
|
||||
config.FilesS3TempDir.Set(tempDir)
|
||||
|
||||
client := &fakeS3PutObjectClient{}
|
||||
s3Client = client
|
||||
s3Bucket = "test-bucket"
|
||||
|
||||
content := []byte("non-seekable-content")
|
||||
file := &File{ID: 456, Size: 0}
|
||||
|
||||
err := file.Save(&readerOnly{r: bytes.NewReader(content)})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, client.lastInput)
|
||||
require.NotNil(t, client.lastInput.ContentLength)
|
||||
assert.Equal(t, int64(len(content)), *client.lastInput.ContentLength)
|
||||
assert.IsType(t, &os.File{}, client.lastInput.Body)
|
||||
|
||||
entries, err := os.ReadDir(tempDir)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, entries)
|
||||
}
|
||||
|
||||
func TestFileSave_S3_CleansUpTempFileOnPutObjectError(t *testing.T) {
|
||||
originalClient := s3Client
|
||||
originalBucket := s3Bucket
|
||||
originalTempDir := config.FilesS3TempDir.GetString()
|
||||
t.Cleanup(func() {
|
||||
s3Client = originalClient
|
||||
s3Bucket = originalBucket
|
||||
config.FilesS3TempDir.Set(originalTempDir)
|
||||
})
|
||||
|
||||
tempDir := t.TempDir()
|
||||
config.FilesS3TempDir.Set(tempDir)
|
||||
|
||||
client := &fakeS3PutObjectClient{err: errors.New("boom")}
|
||||
s3Client = client
|
||||
s3Bucket = "test-bucket"
|
||||
|
||||
content := []byte("non-seekable-content")
|
||||
file := &File{ID: 789, Size: 0}
|
||||
|
||||
err := file.Save(&readerOnly{r: bytes.NewReader(content)})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to upload file to S3")
|
||||
|
||||
entries, readErr := os.ReadDir(tempDir)
|
||||
require.NoError(t, readErr)
|
||||
assert.Empty(t, entries)
|
||||
}
|
||||
|
||||
func TestFileSave_S3_UsesBufferedSizeWhenExpectedSizeMismatch(t *testing.T) {
|
||||
originalClient := s3Client
|
||||
originalBucket := s3Bucket
|
||||
originalTempDir := config.FilesS3TempDir.GetString()
|
||||
t.Cleanup(func() {
|
||||
s3Client = originalClient
|
||||
s3Bucket = originalBucket
|
||||
config.FilesS3TempDir.Set(originalTempDir)
|
||||
})
|
||||
|
||||
tempDir := t.TempDir()
|
||||
config.FilesS3TempDir.Set(tempDir)
|
||||
|
||||
client := &fakeS3PutObjectClient{}
|
||||
s3Client = client
|
||||
s3Bucket = "test-bucket"
|
||||
|
||||
content := []byte("mismatch-content")
|
||||
file := &File{ID: 999, Size: uint64(len(content) + 10)}
|
||||
|
||||
err := file.Save(&readerOnly{r: bytes.NewReader(content)})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, client.lastInput)
|
||||
require.NotNil(t, client.lastInput.ContentLength)
|
||||
assert.Equal(t, int64(len(content)), *client.lastInput.ContentLength)
|
||||
|
||||
entries, readErr := os.ReadDir(tempDir)
|
||||
require.NoError(t, readErr)
|
||||
assert.Empty(t, entries)
|
||||
}
|
||||
Reference in New Issue
Block a user