diff --git a/cmd/serve/s3/s3_test.go b/cmd/serve/s3/s3_test.go
index 09e20f3f5bd39..f8827e99fdef3 100644
--- a/cmd/serve/s3/s3_test.go
+++ b/cmd/serve/s3/s3_test.go
@@ -8,10 +8,22 @@ import (
"context"
"encoding/hex"
"fmt"
+ "io"
+ "math/rand"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "os/exec"
+ "path"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
_ "github.com/rclone/rclone/backend/local"
- "github.com/rclone/rclone/backend/webdav"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -23,17 +35,6 @@ import (
httplib "github.com/rclone/rclone/lib/http"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "io"
- "math/rand"
- "net/http"
- "net/http/httptest"
- "net/url"
- "os"
- "os/exec"
- "path"
- "strings"
- "testing"
- "time"
)
const (
@@ -129,6 +130,40 @@ const (
`
+ propfindEmptyBucketResponse = `
+
+
+
+ /bucket/
+
+
+ Wed, 28 Jun 2023 07:58:52 GMT
+
+
+
+ 0
+ -3
+ "649be83ccc047"
+
+ HTTP/1.1 200 OK
+
+
+`
+ propfindFileResponse = `
+
+
+ /bucket/%s
+
+
+ %d
+
+ "a78a2b6eebbe67dda2c898c23c164eb6"
+
+ HTTP/1.1 200 OK
+
+
+
+`
)
// Configure and serve the server
@@ -138,7 +173,9 @@ func serveS3(f fs.Fs, keyid string, keysec string) (testURL string) {
pathBucketMode: true,
hashName: "",
hashType: hash.None,
- authPair: []string{fmt.Sprintf("%s,%s", keyid, keysec)},
+ }
+ if keyid != "" && keysec != "" {
+ serveropt.authPair = []string{fmt.Sprintf("%s,%s", keyid, keysec)}
}
serveropt.HTTP.ListenAddr = []string{endpoint}
@@ -170,7 +207,7 @@ func TestS3(t *testing.T) {
keysec := RandString(16)
testURL := serveS3(f, keyid, keysec)
// Config for the backend we'll use to connect to the server
- config := configmap.Simple{
+ s3ServerConfig := configmap.Simple{
"type": "s3",
"provider": "Rclone",
"endpoint": testURL,
@@ -179,7 +216,7 @@ func TestS3(t *testing.T) {
"secret_access_key": keysec,
}
- return config, func() {}
+ return s3ServerConfig, func() {}
}
RunS3UnitTests(t, "s3", start)
@@ -198,7 +235,7 @@ func RunS3UnitTests(t *testing.T, name string, start servetest.StartFn) {
assert.NoError(t, err)
f := fremote
- config, cleanup := start(f)
+ s3ServerConfig, cleanup := start(f)
defer cleanup()
// Change directory to run the tests
@@ -228,7 +265,7 @@ func RunS3UnitTests(t *testing.T, name string, start servetest.StartFn) {
// Configure the backend with environment variables
cmd.Env = os.Environ()
prefix := "RCLONE_CONFIG_" + strings.ToUpper(remoteName[:len(remoteName)-1]) + "_"
- for k, v := range config {
+ for k, v := range s3ServerConfig {
cmd.Env = append(cmd.Env, prefix+strings.ToUpper(k)+"="+v)
}
@@ -240,16 +277,8 @@ func RunS3UnitTests(t *testing.T, name string, start servetest.StartFn) {
assert.NoError(t, err, "Running "+name+" integration tests")
}
-func prepareWebDavServer(t *testing.T, keyid string) func() {
- // test the headers are there send a dummy response to About
- expectedAuthHeader := "Bearer " + keyid
- handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- //what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
- //assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
- assert.Equal(t, expectedAuthHeader, r.Header.Get("Authorization"))
- fmt.Fprintf(w, propfindResponseRoot)
- })
-
+// prepare the test server and return a function to tidy it up afterwards
+func prepareWebDavFs(t *testing.T, handler http.HandlerFunc) (fs.Fs, func()) {
// Make the test server
ts := httptest.NewServer(handler)
@@ -260,25 +289,25 @@ func prepareWebDavServer(t *testing.T, keyid string) func() {
err := config.SetValueAndSave("webdavtest", "url", ts.URL)
assert.NoError(t, err)
- // return a function to tidy up
- return ts.Close
-}
-
-// prepare the test server and return a function to tidy it up afterwards
-func prepareWebDavFs(t *testing.T, keyid string) (fs.Fs, func()) {
- tidy := prepareWebDavServer(t, keyid)
-
// Instantiate the WebDAV server
- f, err := webdav.NewFs(context.Background(), "webdavtest", "", configmap.Simple{})
+ info, name, remote, webdavServerConfig, _ := fs.ConfigFs("webdavtest:")
+ f, err := info.NewFs(context.Background(), name, remote, webdavServerConfig)
+
require.NoError(t, err)
- return f, tidy
+ return f, ts.Close
}
func TestForwardAccessKeyToWebDav(t *testing.T) {
keyid := RandString(16)
keysec := RandString(16)
- f, clean := prepareWebDavFs(t, keyid)
+ expectedAuthHeader := "Bearer " + keyid
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ assert.Equal(t, expectedAuthHeader, r.Header.Get("Authorization"))
+ _, err := fmt.Fprint(w, propfindResponseRoot)
+ assert.NoError(t, err)
+ })
+ f, clean := prepareWebDavFs(t, handler)
defer clean()
endpoint := serveS3(f, keyid, keysec)
testURL, _ := url.Parse(endpoint)
@@ -291,7 +320,90 @@ func TestForwardAccessKeyToWebDav(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, buckets[0].Name, "bucket")
assert.Equal(t, buckets[1].Name, "bucket2")
+}
+
+// when receiving multiple requests in parallel with different tokens we have to make sure the correct
+// Auth header is set for every request and there is no race-condition where parallel requests overwrite each
+// others headers
+// to test that case we send multiple PutObject requests with an object name that matches the S3 key
+// on the webdav side, we check if the object name is the same as the Auth Bearer token
+func TestForwardAccessKeyToWebDavParallelRequests(t *testing.T) {
+ webdavHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // the proxy will send different requests to check the path etc
+ if r.Method == "PROPFIND" {
+ if r.URL.Path == "/" {
+ _, err := fmt.Fprint(w, propfindResponseRoot)
+ assert.NoError(t, err)
+ } else if r.URL.Path == "/bucket/" {
+ _, err := fmt.Fprint(w, propfindEmptyBucketResponse)
+ assert.NoError(t, err)
+ } else {
+ // this is the main check, we want to make sure that the path belongs
+ // to the correct user
+ expectedKeyIdExtractedFromPath := path.Base(r.URL.Path)
+ assert.Equal(t, "Bearer "+expectedKeyIdExtractedFromPath, r.Header.Get("Authorization"))
+ _, err := fmt.Fprintf(w, propfindFileResponse, expectedKeyIdExtractedFromPath, 8)
+ assert.NoError(t, err)
+ }
+ } else if r.Method == "PUT" {
+ // this is the main check, we want to make sure that the path belongs
+ // to the correct user
+ expectedKeyIdExtractedFromPath := path.Base(r.URL.Path)
+ assert.Equal(t, "Bearer "+expectedKeyIdExtractedFromPath, r.Header.Get("Authorization"))
+ }
+ })
+ f, clean := prepareWebDavFs(t, webdavHandler)
+ defer clean()
+
+ keyids := []string{
+ "KeyOfUserAlice",
+ "KeyOfUserBob",
+ "KeyOfUserCarol",
+ "KeyOfUserDavid",
+ }
+ var wg sync.WaitGroup
+ wg.Add(len(keyids))
+
+ endpoint := serveS3(f, "", "")
+ testURL, _ := url.Parse(endpoint)
+
+ responseChannel := make(chan error)
+
+ for _, keyid := range keyids {
+ keyid := keyid
+ go func(responseChannel chan<- error) {
+ defer wg.Done()
+ minioClient, err := minio.New(testURL.Host, &minio.Options{
+ Creds: credentials.NewStaticV4(keyid, "does-not-matter-will-be-ignored-by-server", ""),
+ Secure: false,
+ })
+ if err != nil {
+ responseChannel <- err
+ return
+ }
+ buf := bytes.NewBufferString("contents")
+ uploadHash := hash.NewMultiHasher()
+ in := io.TeeReader(buf, uploadHash)
+ _, err = minioClient.PutObject(
+ context.Background(), "bucket", keyid, in, int64(buf.Len()),
+ minio.PutObjectOptions{},
+ )
+ if err != nil {
+ responseChannel <- err
+ return
+ }
+ }(responseChannel)
+ }
+
+ go func() {
+ wg.Wait()
+ close(responseChannel)
+ }()
+ for i := 0; i < len(keyids); i++ {
+ response := <-responseChannel
+ assert.NoError(t, response)
+ }
}
// tests using the minio client
@@ -353,11 +465,11 @@ func TestEncodingWithMinioClient(t *testing.T) {
buckets, err := minioClient.ListBuckets(context.Background())
assert.NoError(t, err)
assert.Equal(t, buckets[0].Name, tt.bucket)
- objects := minioClient.ListObjects(context.Background(), tt.bucket, minio.ListObjectsOptions{
+ s3objects := minioClient.ListObjects(context.Background(), tt.bucket, minio.ListObjectsOptions{
Recursive: true,
})
- for object := range objects {
- assert.Equal(t, path.Join(tt.path, tt.filename), object.Key)
+ for s3object := range s3objects {
+ assert.Equal(t, path.Join(tt.path, tt.filename), s3object.Key)
}
})
}