swift: add --swift-no-large-objects to reduce HEAD requests

Supplying the flag --swift-no-large-objects is a promise to the swift
backend that there are no dynamic or static large objects stored.

Using that knowledge rclone can speed its operations up reducing the
number of HEAD requests.

See: https://forum.rclone.org/t/handling-or-not-of-large-objects-in-swift/33389/
See: https://forum.rclone.org/t/swift-sync-checksum-calls-head-on-every-object-so-is-very-slow/30322
This commit is contained in:
Nick Craig-Wood 2022-10-05 17:39:24 +01:00
parent 3f804224f4
commit 5ea9398b63
1 changed files with 37 additions and 1 deletions

View File

@ -63,6 +63,32 @@ Rclone will still chunk files bigger than chunk_size when doing normal
copy operations.`,
Default: false,
Advanced: true,
}, {
Name: "no_large_objects",
Help: strings.ReplaceAll(`Disable support for static and dynamic large objects
Swift cannot transparently store files bigger than 5 GiB. There are
two schemes for doing that, static or dynamic large objects, and the
API does not allow rclone to determine whether a file is a static or
dynamic large object without doing a HEAD on the object. Since these
need to be treated differently, this means rclone has to issue HEAD
requests for objects for example when reading checksums.
When |no_large_objects| is set, rclone will assume that there are no
static or dynamic large objects stored. This means it can stop doing
the extra HEAD calls which in turn increases performance greatly
especially when doing a swift to swift transfer with |--checksum| set.
Setting this option implies |no_chunk| and also that no files will be
uploaded in chunks, so files bigger than 5 GiB will just fail on
upload.
If you set this option and there *are* static or dynamic large objects,
then this will give incorrect hashes for them. Downloads will succeed,
but other operations such as Remove and Copy will fail.
`, "|", "`"),
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@ -222,6 +248,7 @@ type Options struct {
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"`
NoLargeObjects bool `config:"no_large_objects"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@ -1100,15 +1127,24 @@ func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
// isDynamicLargeObject checks for X-Object-Manifest header
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
if o.fs.opt.NoLargeObjects {
return false, nil
}
return o.hasHeader(ctx, "X-Object-Manifest")
}
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
if o.fs.opt.NoLargeObjects {
return false, nil
}
return o.hasHeader(ctx, "X-Static-Large-Object")
}
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
if o.fs.opt.NoLargeObjects {
return false, nil
}
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
if result {
return
@ -1464,7 +1500,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
headers := m.ObjectHeaders()
fs.OpenOptionAddHeaders(options, headers)
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
if (size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk)) && !o.fs.opt.NoLargeObjects {
_, err = o.updateChunks(ctx, in, headers, size, contentType)
if err != nil {
return err