From dc06973796392edfa613a053f899ef869134c388 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Sat, 14 Mar 2020 17:28:29 +0000 Subject: [PATCH] s3: use rclone's low level retries instead of AWS SDK to fix listing retries In 5470d34740d03e15 "backend/s3: use low-level-retries as the number of SDK retries" we switched over to using the AWS SDK low level retries instead of rclone's low level retry logic. This had the unfortunate attempt that retrying listings to correct XML Syntax errors failed on non S3 backends such as CEPH. The AWS SDK was also retrying the XML Syntax error request which doesn't make sense. This change turns off the AWS SDK retries in favour of just using rclone's retry logic. --- backend/s3/s3.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index e62d85ee7..2e1f1d574 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -1099,7 +1099,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) { opt.ForcePathStyle = false } awsConfig := aws.NewConfig(). - WithMaxRetries(fs.Config.LowLevelRetries). + WithMaxRetries(0). // Rely on rclone's retry logic WithCredentials(cred). WithHTTPClient(fshttp.NewClient(fs.Config)). WithS3ForcePathStyle(opt.ForcePathStyle). @@ -1206,17 +1206,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { return nil, err } - pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))) - // Set pacer retries to 0 because we are relying on SDK retry mechanism. - // Setting it to 1 because in context of pacer it means 1 attempt. - pc.SetRetries(1) - f := &Fs{ name: name, opt: *opt, c: c, ses: ses, - pacer: pc, + pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))), cache: bucket.NewCache(), srv: fshttp.NewClient(fs.Config), pools: make(map[int64]*pool.Pool),