mirror of
https://github.com/go-vikunja/vikunja.git
synced 2026-04-28 10:28:33 -05:00
feat(files): validate file storage is writable on startup (#2053)
Adds startup validation that checks if the configured file storage is writable. To do this, Vikunja now tries to create a temporary file and clean it up afterwards.
This commit is contained in:
@@ -17,12 +17,14 @@
|
||||
package files
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"code.vikunja.io/api/pkg/config"
|
||||
"code.vikunja.io/api/pkg/db"
|
||||
@@ -107,6 +109,7 @@ func initS3FileHandler() error {
|
||||
func initLocalFileHandler() {
|
||||
fs = afero.NewOsFs()
|
||||
afs = &afero.Afero{Fs: fs}
|
||||
s3Client = nil
|
||||
setDefaultLocalConfig()
|
||||
}
|
||||
|
||||
@@ -116,13 +119,20 @@ func InitFileHandler() error {
|
||||
|
||||
switch fileType {
|
||||
case "s3":
|
||||
return initS3FileHandler()
|
||||
if err := initS3FileHandler(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "local":
|
||||
initLocalFileHandler()
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("invalid file storage type '%s': must be 'local' or 's3'", fileType)
|
||||
}
|
||||
|
||||
if err := ValidateFileStorage(); err != nil {
|
||||
return fmt.Errorf("storage validation failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitTestFileHandler initializes a new memory file system for testing
|
||||
@@ -175,3 +185,22 @@ func InitTests() {
|
||||
func FileStat(file *File) (os.FileInfo, error) {
|
||||
return afs.Stat(file.getAbsoluteFilePath())
|
||||
}
|
||||
|
||||
// ValidateFileStorage checks that the configured file storage is writable
|
||||
// by creating and removing a temporary file.
|
||||
func ValidateFileStorage() error {
|
||||
filename := fmt.Sprintf(".vikunja-check-%d", time.Now().UnixNano())
|
||||
path := filepath.Join(config.FilesBasePath.GetString(), filename)
|
||||
|
||||
err := writeToStorage(path, bytes.NewReader([]byte{}), 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create test file at %s: %w", path, err)
|
||||
}
|
||||
|
||||
err = afs.Remove(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove test file at %s: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -152,22 +152,13 @@ func (f *File) Delete(s *xorm.Session) (err error) {
|
||||
return keyvalue.DecrBy(metrics.FilesCountKey, 1)
|
||||
}
|
||||
|
||||
// Save saves a file to storage
|
||||
func (f *File) Save(fcontent io.Reader) (err error) {
|
||||
|
||||
// writeToStorage writes content to the given path, handling both local and S3 backends
|
||||
func writeToStorage(path string, content io.Reader, size uint64) error {
|
||||
if s3Client == nil {
|
||||
err = afs.WriteReader(f.getAbsoluteFilePath(), fcontent)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save file: %w", err)
|
||||
}
|
||||
|
||||
return keyvalue.IncrBy(metrics.FilesCountKey, 1)
|
||||
return afs.WriteReader(path, content)
|
||||
}
|
||||
|
||||
// For S3 storage, use PutObject directly with Content-Length to enable streaming
|
||||
// without buffering the entire file in memory. Some S3-compatible services
|
||||
// (like MinIO) require Content-Length to be set explicitly.
|
||||
body, contentLength, cleanup, err := prepareS3UploadBody(fcontent, f.Size)
|
||||
body, contentLength, cleanup, err := prepareS3UploadBody(content, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -177,14 +168,22 @@ func (f *File) Save(fcontent io.Reader) (err error) {
|
||||
|
||||
_, err = s3Client.PutObject(&s3.PutObjectInput{
|
||||
Bucket: aws.String(s3Bucket),
|
||||
Key: aws.String(f.getAbsoluteFilePath()),
|
||||
Key: aws.String(path),
|
||||
Body: body,
|
||||
ContentLength: aws.Int64(contentLength),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file to S3: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save saves a file to storage
|
||||
func (f *File) Save(fcontent io.Reader) error {
|
||||
err := writeToStorage(f.getAbsoluteFilePath(), fcontent, f.Size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save file: %w", err)
|
||||
}
|
||||
return keyvalue.IncrBy(metrics.FilesCountKey, 1)
|
||||
}
|
||||
|
||||
|
||||
@@ -220,9 +220,12 @@ func TestInitFileHandler_S3Configuration(t *testing.T) {
|
||||
config.FilesS3AccessKey.Set("test-access-key")
|
||||
config.FilesS3SecretKey.Set("test-secret-key")
|
||||
|
||||
// This should not return an error with valid configuration
|
||||
// With valid configuration, InitFileHandler will succeed at config parsing
|
||||
// but fail at storage validation (since the S3 endpoint isn't real).
|
||||
// The error should be from validation, not from config parsing.
|
||||
err := InitFileHandler()
|
||||
assert.NoError(t, err)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "storage validation failed")
|
||||
})
|
||||
|
||||
t.Run("missing S3 endpoint", func(t *testing.T) {
|
||||
@@ -281,14 +284,20 @@ func TestInitFileHandler_S3Configuration(t *testing.T) {
|
||||
func TestInitFileHandler_LocalFilesystem(t *testing.T) {
|
||||
// Save original config values
|
||||
originalType := config.FilesType.GetString()
|
||||
originalBasePath := config.FilesBasePath.GetString()
|
||||
|
||||
// Create a temp directory for the test
|
||||
tempDir := t.TempDir()
|
||||
|
||||
// Restore config after test
|
||||
defer func() {
|
||||
config.FilesType.Set(originalType)
|
||||
config.FilesBasePath.Set(originalBasePath)
|
||||
}()
|
||||
|
||||
// Test with local filesystem
|
||||
// Test with local filesystem using writable temp directory
|
||||
config.FilesType.Set("local")
|
||||
config.FilesBasePath.Set(tempDir)
|
||||
|
||||
// This should not return an error
|
||||
err := InitFileHandler()
|
||||
|
||||
Reference in New Issue
Block a user