serve s3: fix listing oddities

Before this change, listing a subdirectory gave errors like this:

    Entry doesn't belong in directory "" (contains subdir) - ignoring

It also did full recursive listings when it didn't need to.

This was caused by the code using the underlying Fs to do recursive
listings on bucket based backends.

Using both the VFS and the underlying Fs is a mistake so this patch
removes the code which uses the underlying Fs and just uses the VFS.

Fixes #7500
This commit is contained in:
Nick Craig-Wood 2023-12-05 11:11:29 +00:00
parent bff56d0b24
commit e174c8f822
2 changed files with 5 additions and 47 deletions

View File

@ -77,13 +77,9 @@ func (b *s3Backend) ListBucket(bucket string, prefix *gofakes3.Prefix, page gofa
}
response := gofakes3.NewObjectList()
if b.vfs.Fs().Features().BucketBased || prefix.HasDelimiter && prefix.Delimiter != "/" {
err = b.getObjectsListArbitrary(bucket, prefix, response)
} else {
path, remaining := prefixParser(prefix)
err = b.entryListR(bucket, path, remaining, prefix.HasDelimiter, response)
}
path, remaining := prefixParser(prefix)
err = b.entryListR(bucket, path, remaining, prefix.HasDelimiter, response)
if err == gofakes3.ErrNoSuchKey {
// AWS just returns an empty list
response = gofakes3.NewObjectList()
@ -366,9 +362,7 @@ func (b *s3Backend) deleteObject(bucketName, objectName string) error {
}
// FIXME: unsafe operation
if b.vfs.Fs().Features().CanHaveEmptyDirectories {
rmdirRecursive(fp, b.vfs)
}
rmdirRecursive(fp, b.vfs)
return nil
}

View File

@ -1,16 +1,13 @@
package s3
import (
"context"
"path"
"strings"
"github.com/Mikubill/gofakes3"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
)
func (b *s3Backend) entryListR(bucket, fdPath, name string, acceptComPrefix bool, response *gofakes3.ObjectList) error {
func (b *s3Backend) entryListR(bucket, fdPath, name string, addPrefix bool, response *gofakes3.ObjectList) error {
fp := path.Join(bucket, fdPath)
dirEntries, err := getDirEntries(fp, b.vfs)
@ -29,7 +26,7 @@ func (b *s3Backend) entryListR(bucket, fdPath, name string, acceptComPrefix bool
}
if entry.IsDir() {
if acceptComPrefix {
if addPrefix {
response.AddPrefix(gofakes3.URLEncode(objectPath))
continue
}
@ -50,36 +47,3 @@ func (b *s3Backend) entryListR(bucket, fdPath, name string, acceptComPrefix bool
}
return nil
}
// getObjectsList lists the objects in the given bucket.
func (b *s3Backend) getObjectsListArbitrary(bucket string, prefix *gofakes3.Prefix, response *gofakes3.ObjectList) error {
// ignore error - vfs may have uncommitted updates, such as new dir etc.
_ = walk.ListR(context.Background(), b.vfs.Fs(), bucket, false, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, entry := range entries {
entry := entry.(fs.Object)
objName := entry.Remote()
object := strings.TrimPrefix(objName, bucket)[1:]
var matchResult gofakes3.PrefixMatch
if prefix.Match(object, &matchResult) {
if matchResult.CommonPrefix {
response.AddPrefix(gofakes3.URLEncode(object))
continue
}
item := &gofakes3.Content{
Key: gofakes3.URLEncode(object),
LastModified: gofakes3.NewContentTime(entry.ModTime(context.Background())),
ETag: getFileHash(entry),
Size: entry.Size(),
StorageClass: gofakes3.StorageStandard,
}
response.Add(item)
}
}
return nil
})
return nil
}