diff --git a/backend/s3/s3.go b/backend/s3/s3.go index c76c21f89..fe53d1479 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -850,7 +850,6 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option. Default: memoryPoolFlushTime, Advanced: true, Help: `How often internal memory buffer pools will be flushed. - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. This option controls how often unused buffers will be removed from the pool.`, }, { @@ -866,7 +865,6 @@ This option controls how often unused buffers will be removed from the pool.`, const ( metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime metaMD5Hash = "Md5chksum" // the meta key to store md5hash in - maxRetries = 10 // number of retries to make of operations maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload minChunkSize = fs.SizeSuffix(1024 * 1024 * 5) @@ -1095,7 +1093,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) { opt.ForcePathStyle = false } awsConfig := aws.NewConfig(). - WithMaxRetries(maxRetries). + WithMaxRetries(fs.Config.LowLevelRetries). WithCredentials(cred). WithHTTPClient(fshttp.NewClient(fs.Config)). WithS3ForcePathStyle(opt.ForcePathStyle). @@ -1202,12 +1200,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { return nil, err } + pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))) + // Set pacer retries to 0 because we are relying on SDK retry mechanism. + // Setting it to 1 because in context of pacer it means 1 attempt. + pc.SetRetries(1) + f := &Fs{ name: name, opt: *opt, c: c, ses: ses, - pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))), + pacer: pc, cache: bucket.NewCache(), srv: fshttp.NewClient(fs.Config), pools: make(map[int64]*pool.Pool),