From 83f61a9cfb8d914495b3aca51c2e91f92e1ac5a0 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Sat, 3 Feb 2024 11:14:23 +0000 Subject: [PATCH] s3: GCS provider: fix server side copy of files bigger than 5G GCS gives NotImplemented errors for multi-part server side copies. The threshold for these is currently set just below 5G so any files bigger than 5G that rclone attempts to server side copy will fail. This patch works around the problem by adding a quirk for GCS raising --s3-copy-cutoff to the maximum. This means that rclone will never use multi-part copies for files in GCS. This includes files bigger than 5GB which (according to AWS documentation) must be copied with multi-part copy. However this seems to work with GCS. See: https://forum.rclone.org/t/chunker-uploads-to-gcs-s3-fail-if-the-chunk-size-is-greater-than-the-max-part-size/44349/ See: https://issuetracker.google.com/issues/323465186 --- backend/s3/s3.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 473f74ece..9183670c4 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3222,6 +3222,10 @@ func setQuirks(opt *Options) { // https://github.com/rclone/rclone/issues/6670 useAcceptEncodingGzip = false useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough! + // GCS S3 doesn't support multi-part server side copy: + // See: https://issuetracker.google.com/issues/323465186 + // So make cutoff very large which it does seem to support + opt.CopyCutoff = math.MaxInt64 default: fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider) fallthrough