From c27977d4d5269ef0317edc13659718e09bcd7266 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Fri, 24 Nov 2023 12:36:48 +0000 Subject: [PATCH] fstest: factor chunked copy tests from b2 and use them in s3 and oos --- backend/b2/b2.go | 8 ++ backend/b2/b2_internal_test.go | 43 ---------- backend/b2/b2_test.go | 5 ++ .../oracleobjectstorage.go | 8 ++ .../oracleobjectstorage_test.go | 10 ++- backend/s3/s3.go | 8 ++ backend/s3/s3_test.go | 10 ++- fstest/fstests/fstests.go | 86 +++++++++++++++++++ 8 files changed, 133 insertions(+), 45 deletions(-) diff --git a/backend/b2/b2.go b/backend/b2/b2.go index 6c83c5895..870a8178e 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -455,6 +455,14 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { return } +func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + err = checkUploadChunkSize(cs) + if err == nil { + old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs + } + return +} + // setRoot changes the root of the Fs func (f *Fs) setRoot(root string) { f.root = parsePath(root) diff --git a/backend/b2/b2_internal_test.go b/backend/b2/b2_internal_test.go index f5f0f33ff..a2211fd8c 100644 --- a/backend/b2/b2_internal_test.go +++ b/backend/b2/b2_internal_test.go @@ -178,48 +178,6 @@ func TestParseTimeString(t *testing.T) { } -// The integration tests do a reasonable job of testing the normal -// copy but don't test the chunked copy. -func (f *Fs) InternalTestChunkedCopy(t *testing.T) { - ctx := context.Background() - - contents := random.String(8 * 1024 * 1024) - item := fstest.NewItem("chunked-copy", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) - src := fstests.PutTestContents(ctx, t, f, &item, contents, true) - defer func() { - assert.NoError(t, src.Remove(ctx)) - }() - - var itemCopy = item - itemCopy.Path += ".copy" - - // Set copy cutoff to mininum value so we make chunks - origCutoff := f.opt.CopyCutoff - f.opt.CopyCutoff = minChunkSize - defer func() { - f.opt.CopyCutoff = origCutoff - }() - - // Do the copy - dst, err := f.Copy(ctx, src, itemCopy.Path) - require.NoError(t, err) - defer func() { - assert.NoError(t, dst.Remove(ctx)) - }() - - // Check size - assert.Equal(t, src.Size(), dst.Size()) - - // Check modtime - srcModTime := src.ModTime(ctx) - dstModTime := dst.ModTime(ctx) - assert.True(t, srcModTime.Equal(dstModTime)) - - // Make sure contents are correct - gotContents := fstests.ReadObject(ctx, t, dst, -1) - assert.Equal(t, contents, gotContents) -} - // The integration tests do a reasonable job of testing the normal // streaming upload but don't test the chunked streaming upload. func (f *Fs) InternalTestChunkedStreamingUpload(t *testing.T, size int) { @@ -259,7 +217,6 @@ func (f *Fs) InternalTestChunkedStreamingUpload(t *testing.T, size int) { // -run TestIntegration/FsMkdir/FsPutFiles/Internal func (f *Fs) InternalTest(t *testing.T) { - t.Run("ChunkedCopy", f.InternalTestChunkedCopy) for _, size := range []fs.SizeSuffix{ minChunkSize - 1, minChunkSize, diff --git a/backend/b2/b2_test.go b/backend/b2/b2_test.go index 6437d2adc..43e28e5b1 100644 --- a/backend/b2/b2_test.go +++ b/backend/b2/b2_test.go @@ -28,7 +28,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } +func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { + return f.setCopyCutoff(cs) +} + var ( _ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil) + _ fstests.SetCopyCutoffer = (*Fs)(nil) ) diff --git a/backend/oracleobjectstorage/oracleobjectstorage.go b/backend/oracleobjectstorage/oracleobjectstorage.go index e1cc2980d..2133ebc09 100644 --- a/backend/oracleobjectstorage/oracleobjectstorage.go +++ b/backend/oracleobjectstorage/oracleobjectstorage.go @@ -138,6 +138,14 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { return } +func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + err = checkUploadChunkSize(cs) + if err == nil { + old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs + } + return +} + // ------------------------------------------------------------ // Implement backed that represents a remote object storage server // Fs is the interface a cloud storage system must provide diff --git a/backend/oracleobjectstorage/oracleobjectstorage_test.go b/backend/oracleobjectstorage/oracleobjectstorage_test.go index 479da7b5b..daccfaeed 100644 --- a/backend/oracleobjectstorage/oracleobjectstorage_test.go +++ b/backend/oracleobjectstorage/oracleobjectstorage_test.go @@ -30,4 +30,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } -var _ fstests.SetUploadChunkSizer = (*Fs)(nil) +func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { + return f.setCopyCutoff(cs) +} + +var ( + _ fstests.SetUploadChunkSizer = (*Fs)(nil) + _ fstests.SetUploadCutoffer = (*Fs)(nil) + _ fstests.SetCopyCutoffer = (*Fs)(nil) +) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index bb4e97c61..42526100e 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3011,6 +3011,14 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { return } +func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + err = checkUploadChunkSize(cs) + if err == nil { + old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs + } + return +} + // setEndpointValueForIDriveE2 gets user region endpoint against the Access Key details by calling the API func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) { value, ok := m.Get(fs.ConfigProvider) diff --git a/backend/s3/s3_test.go b/backend/s3/s3_test.go index 2cec88185..3415bebf2 100644 --- a/backend/s3/s3_test.go +++ b/backend/s3/s3_test.go @@ -47,4 +47,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { return f.setUploadCutoff(cs) } -var _ fstests.SetUploadChunkSizer = (*Fs)(nil) +func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { + return f.setCopyCutoff(cs) +} + +var ( + _ fstests.SetUploadChunkSizer = (*Fs)(nil) + _ fstests.SetUploadCutoffer = (*Fs)(nil) + _ fstests.SetCopyCutoffer = (*Fs)(nil) +) diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go index 9ffbca3c7..700aad41a 100644 --- a/fstest/fstests/fstests.go +++ b/fstest/fstests/fstests.go @@ -78,6 +78,13 @@ type SetUploadCutoffer interface { SetUploadCutoff(fs.SizeSuffix) (fs.SizeSuffix, error) } +// SetCopyCutoffer is a test only interface to change the copy cutoff size at runtime +type SetCopyCutoffer interface { + // Change the configured CopyCutoff. + // Will only be called while no transfer is in progress. + SetCopyCutoff(fs.SizeSuffix) (fs.SizeSuffix, error) +} + // NextPowerOfTwo returns the current or next bigger power of two. // All values less or equal 0 will return 0 func NextPowerOfTwo(i fs.SizeSuffix) fs.SizeSuffix { @@ -2096,6 +2103,85 @@ func Run(t *testing.T, opt *Opt) { } }) + // Copy files with chunked copy if available + t.Run("FsCopyChunked", func(t *testing.T) { + skipIfNotOk(t) + if testing.Short() { + t.Skip("not running with -short") + } + + // Check have Copy + doCopy := f.Features().Copy + if doCopy == nil { + t.Skip("FS has no Copier interface") + } + + if opt.ChunkedUpload.Skip { + t.Skip("skipping as ChunkedUpload.Skip is set") + } + + do, _ := f.(SetCopyCutoffer) + if do == nil { + t.Skipf("%T does not implement SetCopyCutoff", f) + } + + minChunkSize := opt.ChunkedUpload.MinChunkSize + if minChunkSize < 100 { + minChunkSize = 100 + } + if opt.ChunkedUpload.CeilChunkSize != nil { + minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize) + } + + chunkSizes := fs.SizeSuffixList{ + minChunkSize, + minChunkSize + 1, + 2*minChunkSize - 1, + 2 * minChunkSize, + 2*minChunkSize + 1, + } + for _, chunkSize := range chunkSizes { + t.Run(fmt.Sprintf("%d", chunkSize), func(t *testing.T) { + contents := random.String(int(chunkSize)) + item := fstest.NewItem("chunked-copy", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + src := PutTestContents(ctx, t, f, &item, contents, true) + defer func() { + assert.NoError(t, src.Remove(ctx)) + }() + + var itemCopy = item + itemCopy.Path += ".copy" + + // Set copy cutoff to mininum value so we make chunks + origCutoff, err := do.SetCopyCutoff(minChunkSize) + require.NoError(t, err) + defer func() { + _, err = do.SetCopyCutoff(origCutoff) + require.NoError(t, err) + }() + + // Do the copy + dst, err := doCopy(ctx, src, itemCopy.Path) + require.NoError(t, err) + defer func() { + assert.NoError(t, dst.Remove(ctx)) + }() + + // Check size + assert.Equal(t, src.Size(), dst.Size()) + + // Check modtime + srcModTime := src.ModTime(ctx) + dstModTime := dst.ModTime(ctx) + assert.True(t, srcModTime.Equal(dstModTime)) + + // Make sure contents are correct + gotContents := ReadObject(ctx, t, dst, -1) + assert.Equal(t, contents, gotContents) + }) + } + }) + // TestFsUploadUnknownSize ensures Fs.Put() and Object.Update() don't panic when // src.Size() == -1 //