From f7e3115955eeec59d0dc618c6d912371153edb09 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Fri, 5 Mar 2021 11:04:57 +0000 Subject: [PATCH] s3: fix Wasabi HEAD requests returning stale data by using only 1 transport In this commit fc5b14b62046b240 s3: Added `--s3-disable-http2` to disable http/2 We created our own transport so we could disable http/2. However the added function is called twice meaning that we create two HTTP transports. This didn't happen with the original code because the default transport is cached by fshttp. Rclone normally does a PUT followed by a HEAD request to check an upload has been successful. With the two transports, the PUT and the HEAD were being done on different HTTP transports. This means that it wasn't re-using the same HTTP connection, so the HEAD request showed the previous object value. This caused rclone to declare the upload was corrupted, delete the object and try again. This patch makes sure we only create one transport and use it for both PUT and HEAD requests which fixes the problem with Wasabi. See: https://forum.rclone.org/t/each-time-rclone-is-run-1-3-fails-2-3-succeeds/22545 --- backend/s3/s3.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index f468a5147..fc570413b 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -1462,7 +1462,7 @@ func getClient(ctx context.Context, opt *Options) *http.Client { } // s3Connection makes a connection to s3 -func s3Connection(ctx context.Context, opt *Options) (*s3.S3, *session.Session, error) { +func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) { // Make the auth v := credentials.Value{ AccessKeyID: opt.AccessKeyID, @@ -1540,7 +1540,7 @@ func s3Connection(ctx context.Context, opt *Options) (*s3.S3, *session.Session, awsConfig := aws.NewConfig(). WithMaxRetries(0). // Rely on rclone's retry logic WithCredentials(cred). - WithHTTPClient(getClient(ctx, opt)). + WithHTTPClient(client). WithS3ForcePathStyle(opt.ForcePathStyle). WithS3UseAccelerate(opt.UseAccelerateEndpoint). WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint) @@ -1644,7 +1644,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey)) opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:]) } - c, ses, err := s3Connection(ctx, opt) + srv := getClient(ctx, opt) + c, ses, err := s3Connection(ctx, opt, srv) if err != nil { return nil, err } @@ -1659,7 +1660,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e ses: ses, pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))), cache: bucket.NewCache(), - srv: getClient(ctx, opt), + srv: srv, pool: pool.New( time.Duration(opt.MemoryPoolFlushTime), int(opt.ChunkSize), @@ -1773,7 +1774,7 @@ func (f *Fs) updateRegionForBucket(bucket string) error { // Make a new session with the new region oldRegion := f.opt.Region f.opt.Region = region - c, ses, err := s3Connection(f.ctx, &f.opt) + c, ses, err := s3Connection(f.ctx, &f.opt, f.srv) if err != nil { return errors.Wrap(err, "creating new session failed") }