diff --git a/vfs/dir_handle_test.go b/vfs/dir_handle_test.go index 74fc483b4..46126b5a2 100644 --- a/vfs/dir_handle_test.go +++ b/vfs/dir_handle_test.go @@ -12,9 +12,8 @@ import ( ) func TestDirHandleMethods(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, dir, _ := dirCreate(t, r) + _, _, dir, _, cleanup := dirCreate(t) + defer cleanup() h, err := dir.Open(os.O_RDONLY) require.NoError(t, err) @@ -40,9 +39,8 @@ func TestDirHandleMethods(t *testing.T) { } func TestDirHandleReaddir(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs := New(r.Fremote, nil) + r, vfs, cleanup := newTestVFS(t) + defer cleanup() file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) file2 := r.WriteObject(context.Background(), "dir/file2", "file2- contents", t2) @@ -96,9 +94,8 @@ func TestDirHandleReaddir(t *testing.T) { } func TestDirHandleReaddirnames(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, dir, _ := dirCreate(t, r) + _, _, dir, _, cleanup := dirCreate(t) + defer cleanup() fh, err := dir.Open(os.O_RDONLY) require.NoError(t, err) diff --git a/vfs/dir_test.go b/vfs/dir_test.go index 81b6bb3d3..d7bba96de 100644 --- a/vfs/dir_test.go +++ b/vfs/dir_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/require" ) -func dirCreate(t *testing.T, r *fstest.Run) (*VFS, *Dir, fstest.Item) { - vfs := New(r.Fremote, nil) +func dirCreate(t *testing.T) (r *fstest.Run, vfs *VFS, dir *Dir, item fstest.Item, cleanup func()) { + r, vfs, cleanup = newTestVFS(t) file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) fstest.CheckItems(t, r.Fremote, file1) @@ -24,13 +24,12 @@ func dirCreate(t *testing.T, r *fstest.Run) (*VFS, *Dir, fstest.Item) { require.NoError(t, err) require.True(t, node.IsDir()) - return vfs, node.(*Dir), file1 + return r, vfs, node.(*Dir), file1, cleanup } func TestDirMethods(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, _ := dirCreate(t, r) + _, vfs, dir, _, cleanup := dirCreate(t) + defer cleanup() // String assert.Equal(t, "dir/", dir.String()) @@ -81,9 +80,8 @@ func TestDirMethods(t *testing.T) { } func TestDirForgetAll(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, file1 := dirCreate(t, r) + _, vfs, dir, file1, cleanup := dirCreate(t) + defer cleanup() // Make sure / and dir are in cache _, err := vfs.Stat(file1.Path) @@ -110,9 +108,8 @@ func TestDirForgetAll(t *testing.T) { } func TestDirForgetPath(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, file1 := dirCreate(t, r) + _, vfs, dir, file1, cleanup := dirCreate(t) + defer cleanup() // Make sure / and dir are in cache _, err := vfs.Stat(file1.Path) @@ -143,9 +140,8 @@ func TestDirForgetPath(t *testing.T) { } func TestDirWalk(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, _, file1 := dirCreate(t, r) + r, vfs, _, file1, cleanup := dirCreate(t) + defer cleanup() file2 := r.WriteObject(context.Background(), "fil/a/b/c", "super long file", t1) fstest.CheckItems(t, r.Fremote, file1, file2) @@ -213,9 +209,8 @@ func TestDirWalk(t *testing.T) { } func TestDirSetModTime(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, _ := dirCreate(t, r) + _, vfs, dir, _, cleanup := dirCreate(t) + defer cleanup() err := dir.SetModTime(t1) require.NoError(t, err) @@ -231,9 +226,8 @@ func TestDirSetModTime(t *testing.T) { } func TestDirStat(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, dir, _ := dirCreate(t, r) + _, _, dir, _, cleanup := dirCreate(t) + defer cleanup() node, err := dir.Stat("file1") require.NoError(t, err) @@ -258,9 +252,8 @@ func checkListing(t *testing.T, dir *Dir, want []string) { } func TestDirReadDirAll(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs := New(r.Fremote, nil) + r, vfs, cleanup := newTestVFS(t) + defer cleanup() file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) file2 := r.WriteObject(context.Background(), "dir/file2", "file2- contents", t2) @@ -287,9 +280,8 @@ func TestDirReadDirAll(t *testing.T) { } func TestDirOpen(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, dir, _ := dirCreate(t, r) + _, _, dir, _, cleanup := dirCreate(t) + defer cleanup() fd, err := dir.Open(os.O_RDONLY) require.NoError(t, err) @@ -302,9 +294,8 @@ func TestDirOpen(t *testing.T) { } func TestDirCreate(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, _ := dirCreate(t, r) + _, vfs, dir, _, cleanup := dirCreate(t) + defer cleanup() file, err := dir.Create("potato", os.O_WRONLY|os.O_CREATE) require.NoError(t, err) @@ -336,9 +327,8 @@ func TestDirCreate(t *testing.T) { } func TestDirMkdir(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, file1 := dirCreate(t, r) + r, vfs, dir, file1, cleanup := dirCreate(t) + defer cleanup() _, err := dir.Mkdir("file1") assert.Error(t, err) @@ -359,9 +349,8 @@ func TestDirMkdir(t *testing.T) { } func TestDirMkdirSub(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, file1 := dirCreate(t, r) + r, vfs, dir, file1, cleanup := dirCreate(t) + defer cleanup() _, err := dir.Mkdir("file1") assert.Error(t, err) @@ -386,9 +375,8 @@ func TestDirMkdirSub(t *testing.T) { } func TestDirRemove(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, _ := dirCreate(t, r) + r, vfs, dir, _, cleanup := dirCreate(t) + defer cleanup() // check directory is there node, err := vfs.Stat("dir") @@ -427,9 +415,8 @@ func TestDirRemove(t *testing.T) { } func TestDirRemoveAll(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, _ := dirCreate(t, r) + r, vfs, dir, _, cleanup := dirCreate(t) + defer cleanup() // Remove the directory and contents err := dir.RemoveAll() @@ -450,9 +437,8 @@ func TestDirRemoveAll(t *testing.T) { } func TestDirRemoveName(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, dir, _ := dirCreate(t, r) + r, vfs, dir, _, cleanup := dirCreate(t) + defer cleanup() err := dir.RemoveName("file1") require.NoError(t, err) @@ -471,15 +457,14 @@ func TestDirRemoveName(t *testing.T) { } func TestDirRename(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() + r, vfs, dir, file1, cleanup := dirCreate(t) + defer cleanup() features := r.Fremote.Features() if features.DirMove == nil && features.Move == nil && features.Copy == nil { - return // skip as can't rename directories + t.Skip("can't rename directories") } - vfs, dir, file1 := dirCreate(t, r) file3 := r.WriteObject(context.Background(), "dir/file3", "file3 contents!", t1) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file3}, []string{"dir"}, r.Fremote.Precision()) diff --git a/vfs/file.go b/vfs/file.go index 299def1e7..03d03a761 100644 --- a/vfs/file.go +++ b/vfs/file.go @@ -325,6 +325,18 @@ func (f *File) Size() int64 { f.mu.RLock() defer f.mu.RUnlock() + // Read the size from a dirty item if it exists + if f.d.vfs.Opt.CacheMode >= vfscommon.CacheModeMinimal { + if item := f.d.vfs.cache.DirtyItem(f._path()); item != nil { + size, err := item.GetSize() + if err != nil { + fs.Errorf(f._path(), "Size: Item GetSize failed: %v", err) + } else { + return size + } + } + } + // if o is nil it isn't valid yet or there are writers, so return the size so far if f._writingInProgress() { return atomic.LoadInt64(&f.size) diff --git a/vfs/file_test.go b/vfs/file_test.go index 4de96273e..61138a3a0 100644 --- a/vfs/file_test.go +++ b/vfs/file_test.go @@ -16,10 +16,11 @@ import ( "github.com/stretchr/testify/require" ) -func fileCreate(t *testing.T, r *fstest.Run, mode vfscommon.CacheMode) (*VFS, *File, fstest.Item) { +func fileCreate(t *testing.T, mode vfscommon.CacheMode) (r *fstest.Run, vfs *VFS, fh *File, item fstest.Item, cleanup func()) { opt := vfscommon.DefaultOpt opt.CacheMode = mode - vfs := New(r.Fremote, &opt) + opt.WriteBack = writeBackDelay + r, vfs, cleanup = newTestVFSOpt(t, &opt) file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) fstest.CheckItems(t, r.Fremote, file1) @@ -28,13 +29,12 @@ func fileCreate(t *testing.T, r *fstest.Run, mode vfscommon.CacheMode) (*VFS, *F require.NoError(t, err) require.True(t, node.Mode().IsRegular()) - return vfs, node.(*File), file1 + return r, vfs, node.(*File), file1, cleanup } func TestFileMethods(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, file, _ := fileCreate(t, r, vfscommon.CacheModeOff) + r, vfs, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff) + defer cleanup() // String assert.Equal(t, "dir/file1", file.String()) @@ -88,12 +88,11 @@ func TestFileMethods(t *testing.T) { } func TestFileSetModTime(t *testing.T) { - r := fstest.NewRun(t) + r, vfs, file, file1, cleanup := fileCreate(t, vfscommon.CacheModeOff) + defer cleanup() if !canSetModTime(t, r) { - return + t.Skip("can't set mod time") } - defer r.Finalise() - vfs, file, file1 := fileCreate(t, r, vfscommon.CacheModeOff) err := file.SetModTime(t2) require.NoError(t, err) @@ -118,9 +117,8 @@ func fileCheckContents(t *testing.T, file *File) { } func TestFileOpenRead(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, file, _ := fileCreate(t, r, vfscommon.CacheModeOff) + _, _, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff) + defer cleanup() fileCheckContents(t, file) } @@ -146,6 +144,7 @@ func TestFileOpenReadUnknownSize(t *testing.T) { // create a VFS from that mockfs vfs := New(f, nil) + defer cleanupVFS(t, vfs) // find the file node, err := vfs.Stat(remote) @@ -171,9 +170,8 @@ func TestFileOpenReadUnknownSize(t *testing.T) { } func TestFileOpenWrite(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, file, _ := fileCreate(t, r, vfscommon.CacheModeOff) + _, vfs, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff) + defer cleanup() fd, err := file.openWrite(os.O_WRONLY | os.O_TRUNC) require.NoError(t, err) @@ -192,9 +190,8 @@ func TestFileOpenWrite(t *testing.T) { } func TestFileRemove(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, file, _ := fileCreate(t, r, vfscommon.CacheModeOff) + r, vfs, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff) + defer cleanup() err := file.Remove() require.NoError(t, err) @@ -207,9 +204,8 @@ func TestFileRemove(t *testing.T) { } func TestFileRemoveAll(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, file, _ := fileCreate(t, r, vfscommon.CacheModeOff) + r, vfs, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff) + defer cleanup() err := file.RemoveAll() require.NoError(t, err) @@ -222,9 +218,8 @@ func TestFileRemoveAll(t *testing.T) { } func TestFileOpen(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, file, _ := fileCreate(t, r, vfscommon.CacheModeOff) + _, _, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff) + defer cleanup() fd, err := file.Open(os.O_RDONLY) require.NoError(t, err) @@ -242,15 +237,15 @@ func TestFileOpen(t *testing.T) { assert.NoError(t, err) _, ok = fd.(*WriteFileHandle) assert.True(t, ok) + require.NoError(t, fd.Close()) _, err = file.Open(3) assert.Equal(t, EPERM, err) } func testFileRename(t *testing.T, mode vfscommon.CacheMode) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, file, item := fileCreate(t, r, mode) + r, vfs, file, item, cleanup := fileCreate(t, mode) + defer cleanup() if !operations.CanServerSideMove(r.Fremote) { t.Skip("skip as can't rename files") @@ -326,6 +321,7 @@ func testFileRename(t *testing.T, mode vfscommon.CacheMode) { // Check file has now been renamed on the remote item.Path = "newLeaf" + vfs.WaitForWriters(waitForWritersDelay) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{newItem}, nil, fs.ModTimeNotSupported) } diff --git a/vfs/read_test.go b/vfs/read_test.go index 3af74a243..23dd5564a 100644 --- a/vfs/read_test.go +++ b/vfs/read_test.go @@ -12,8 +12,8 @@ import ( ) // Open a file for write -func readHandleCreate(t *testing.T, r *fstest.Run) (*VFS, *ReadFileHandle) { - vfs := New(r.Fremote, nil) +func readHandleCreate(t *testing.T) (r *fstest.Run, vfs *VFS, fh *ReadFileHandle, cleanup func()) { + r, vfs, cleanup = newTestVFS(t) file1 := r.WriteObject(context.Background(), "dir/file1", "0123456789abcdef", t1) fstest.CheckItems(t, r.Fremote, file1) @@ -23,7 +23,7 @@ func readHandleCreate(t *testing.T, r *fstest.Run) (*VFS, *ReadFileHandle) { fh, ok := h.(*ReadFileHandle) require.True(t, ok) - return vfs, fh + return r, vfs, fh, cleanup } // read data from the string @@ -37,9 +37,8 @@ func readString(t *testing.T, fh *ReadFileHandle, n int) string { } func TestReadFileHandleMethods(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, fh := readHandleCreate(t, r) + _, _, fh, cleanup := readHandleCreate(t) + defer cleanup() // String assert.Equal(t, "dir/file1 (r)", fh.String()) @@ -81,9 +80,8 @@ func TestReadFileHandleMethods(t *testing.T) { } func TestReadFileHandleSeek(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, fh := readHandleCreate(t, r) + _, _, fh, cleanup := readHandleCreate(t) + defer cleanup() assert.Equal(t, "0", readString(t, fh, 1)) @@ -125,9 +123,8 @@ func TestReadFileHandleSeek(t *testing.T) { } func TestReadFileHandleReadAt(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, fh := readHandleCreate(t, r) + _, _, fh, cleanup := readHandleCreate(t) + defer cleanup() // read from start buf := make([]byte, 1) @@ -182,9 +179,8 @@ func TestReadFileHandleReadAt(t *testing.T) { } func TestReadFileHandleFlush(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, fh := readHandleCreate(t, r) + _, _, fh, cleanup := readHandleCreate(t) + defer cleanup() // Check Flush does nothing if read not called err := fh.Flush() @@ -212,9 +208,8 @@ func TestReadFileHandleFlush(t *testing.T) { } func TestReadFileHandleRelease(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, fh := readHandleCreate(t, r) + _, _, fh, cleanup := readHandleCreate(t) + defer cleanup() // Check Release does nothing if file not read from err := fh.Release() diff --git a/vfs/read_write.go b/vfs/read_write.go index 19a18f445..74262f4ae 100644 --- a/vfs/read_write.go +++ b/vfs/read_write.go @@ -36,7 +36,7 @@ func newRWFileHandle(d *Dir, f *File, flags int) (fh *RWFileHandle, err error) { // get an item to represent this from the cache item := d.vfs.cache.Item(f.Path()) - exists := f.exists() // || item.Exists() + exists := f.exists() || item.Exists() // if O_CREATE and O_EXCL are set and if path already exists, then return EEXIST if flags&(os.O_CREATE|os.O_EXCL) == os.O_CREATE|os.O_EXCL && exists { @@ -51,7 +51,7 @@ func newRWFileHandle(d *Dir, f *File, flags int) (fh *RWFileHandle, err error) { } // truncate immediately if O_TRUNC is set or O_CREATE is set and file doesn't exist - if !fh.readOnly() && (fh.flags&os.O_TRUNC != 0 || (fh.flags&os.O_CREATE != 0 && !(exists || item.Exists()))) { + if !fh.readOnly() && (fh.flags&os.O_TRUNC != 0 || (fh.flags&os.O_CREATE != 0 && !exists)) { err = fh.Truncate(0) if err != nil { return nil, errors.Wrap(err, "cache open with O_TRUNC: failed to truncate") diff --git a/vfs/read_write_test.go b/vfs/read_write_test.go index b05dbe248..3bf713289 100644 --- a/vfs/read_write_test.go +++ b/vfs/read_write_test.go @@ -29,17 +29,12 @@ var ( _ Handle = (*RWFileHandle)(nil) ) -func cleanup(t *testing.T, r *fstest.Run, vfs *VFS) { - assert.NoError(t, vfs.CleanUp()) - vfs.Shutdown() - r.Finalise() -} - // Create a file and open it with the flags passed in -func rwHandleCreateFlags(t *testing.T, r *fstest.Run, create bool, filename string, flags int) (*VFS, *RWFileHandle) { +func rwHandleCreateFlags(t *testing.T, create bool, filename string, flags int) (r *fstest.Run, vfs *VFS, fh *RWFileHandle, cleanup func()) { opt := vfscommon.DefaultOpt opt.CacheMode = vfscommon.CacheModeFull - vfs := New(r.Fremote, &opt) + opt.WriteBack = writeBackDelay + r, vfs, cleanup = newTestVFSOpt(t, &opt) if create { file1 := r.WriteObject(context.Background(), filename, "0123456789abcdef", t1) @@ -51,17 +46,17 @@ func rwHandleCreateFlags(t *testing.T, r *fstest.Run, create bool, filename stri fh, ok := h.(*RWFileHandle) require.True(t, ok) - return vfs, fh + return r, vfs, fh, cleanup } // Open a file for read -func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) { - return rwHandleCreateFlags(t, r, true, "dir/file1", os.O_RDONLY) +func rwHandleCreateReadOnly(t *testing.T) (r *fstest.Run, vfs *VFS, fh *RWFileHandle, cleanup func()) { + return rwHandleCreateFlags(t, true, "dir/file1", os.O_RDONLY) } // Open a file for write -func rwHandleCreateWriteOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) { - return rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE) +func rwHandleCreateWriteOnly(t *testing.T) (r *fstest.Run, vfs *VFS, fh *RWFileHandle, cleanup func()) { + return rwHandleCreateFlags(t, false, "file1", os.O_WRONLY|os.O_CREATE) } // read data from the string @@ -75,9 +70,8 @@ func rwReadString(t *testing.T, fh *RWFileHandle, n int) string { } func TestRWFileHandleMethodsRead(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateReadOnly(t, r) - defer cleanup(t, r, vfs) + _, _, fh, cleanup := rwHandleCreateReadOnly(t) + defer cleanup() // String assert.Equal(t, "dir/file1 (rw)", fh.String()) @@ -123,9 +117,8 @@ func TestRWFileHandleMethodsRead(t *testing.T) { } func TestRWFileHandleSeek(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateReadOnly(t, r) - defer cleanup(t, r, vfs) + _, _, fh, cleanup := rwHandleCreateReadOnly(t) + defer cleanup() assert.Equal(t, fh.opened, false) @@ -174,9 +167,8 @@ func TestRWFileHandleSeek(t *testing.T) { } func TestRWFileHandleReadAt(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateReadOnly(t, r) - defer cleanup(t, r, vfs) + _, _, fh, cleanup := rwHandleCreateReadOnly(t) + defer cleanup() // read from start buf := make([]byte, 1) @@ -225,9 +217,8 @@ func TestRWFileHandleReadAt(t *testing.T) { } func TestRWFileHandleFlushRead(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateReadOnly(t, r) - defer cleanup(t, r, vfs) + _, _, fh, cleanup := rwHandleCreateReadOnly(t) + defer cleanup() // Check Flush does nothing if read not called err := fh.Flush() @@ -255,9 +246,8 @@ func TestRWFileHandleFlushRead(t *testing.T) { } func TestRWFileHandleReleaseRead(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateReadOnly(t, r) - defer cleanup(t, r, vfs) + _, _, fh, cleanup := rwHandleCreateReadOnly(t) + defer cleanup() // Read data buf := make([]byte, 256) @@ -279,9 +269,8 @@ func TestRWFileHandleReleaseRead(t *testing.T) { /// ------------------------------------------------------------ func TestRWFileHandleMethodsWrite(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateWriteOnly(t, r) - defer cleanup(t, r, vfs) + r, vfs, fh, cleanup := rwHandleCreateWriteOnly(t) + defer cleanup() // String assert.Equal(t, "file1 (rw)", fh.String()) @@ -351,13 +340,13 @@ func TestRWFileHandleMethodsWrite(t *testing.T) { // check the underlying r.Fremote but not the modtime file1 := fstest.NewItem("file1", "hello world", t1) + vfs.WaitForWriters(waitForWritersDelay) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported) } func TestRWFileHandleWriteAt(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateWriteOnly(t, r) - defer cleanup(t, r, vfs) + r, vfs, fh, cleanup := rwHandleCreateWriteOnly(t) + defer cleanup() offset := func() int64 { n, err := fh.Seek(0, io.SeekCurrent) @@ -399,13 +388,13 @@ func TestRWFileHandleWriteAt(t *testing.T) { // check the underlying r.Fremote but not the modtime file1 := fstest.NewItem("file1", "hello world", t1) + vfs.WaitForWriters(waitForWritersDelay) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported) } func TestRWFileHandleWriteNoWrite(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateWriteOnly(t, r) - defer cleanup(t, r, vfs) + r, vfs, fh, cleanup := rwHandleCreateWriteOnly(t) + defer cleanup() // Close the file without writing to it err := fh.Close() @@ -433,13 +422,13 @@ func TestRWFileHandleWriteNoWrite(t *testing.T) { // check the underlying r.Fremote but not the modtime file1 := fstest.NewItem("file1", "", t1) file2 := fstest.NewItem("file2", "", t1) + vfs.WaitForWriters(waitForWritersDelay) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2}, []string{}, fs.ModTimeNotSupported) } func TestRWFileHandleFlushWrite(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateWriteOnly(t, r) - defer cleanup(t, r, vfs) + _, _, fh, cleanup := rwHandleCreateWriteOnly(t) + defer cleanup() // Check that the file has been create and is open assert.True(t, fh.opened) @@ -467,9 +456,8 @@ func TestRWFileHandleFlushWrite(t *testing.T) { } func TestRWFileHandleReleaseWrite(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateWriteOnly(t, r) - defer cleanup(t, r, vfs) + _, _, fh, cleanup := rwHandleCreateWriteOnly(t) + defer cleanup() // Write some data n, err := fh.Write([]byte("hello")) @@ -498,9 +486,8 @@ func assertSize(t *testing.T, vfs *VFS, fh *RWFileHandle, filepath string, size } func TestRWFileHandleSizeTruncateExisting(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_TRUNC) - defer cleanup(t, r, vfs) + _, vfs, fh, cleanup := rwHandleCreateFlags(t, true, "dir/file1", os.O_WRONLY|os.O_TRUNC) + defer cleanup() // check initial size after opening assertSize(t, vfs, fh, "dir/file1", 0) @@ -521,9 +508,8 @@ func TestRWFileHandleSizeTruncateExisting(t *testing.T) { } func TestRWFileHandleSizeCreateExisting(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_CREATE) - defer cleanup(t, r, vfs) + _, vfs, fh, cleanup := rwHandleCreateFlags(t, true, "dir/file1", os.O_WRONLY|os.O_CREATE) + defer cleanup() // check initial size after opening assertSize(t, vfs, fh, "dir/file1", 16) @@ -552,9 +538,8 @@ func TestRWFileHandleSizeCreateExisting(t *testing.T) { } func TestRWFileHandleSizeCreateNew(t *testing.T) { - r := fstest.NewRun(t) - vfs, fh := rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE) - defer cleanup(t, r, vfs) + _, vfs, fh, cleanup := rwHandleCreateFlags(t, false, "file1", os.O_WRONLY|os.O_CREATE) + defer cleanup() // check initial size after opening assertSize(t, vfs, fh, "file1", 0) @@ -653,11 +638,11 @@ func testRWFileHandleOpenTest(t *testing.T, vfs *VFS, test *openTest) { } func TestRWFileHandleOpenTests(t *testing.T) { - r := fstest.NewRun(t) opt := vfscommon.DefaultOpt opt.CacheMode = vfscommon.CacheModeFull - vfs := New(r.Fremote, &opt) - defer cleanup(t, r, vfs) + opt.WriteBack = writeBackDelay + _, vfs, cleanup := newTestVFSOpt(t, &opt) + defer cleanup() for _, test := range openTests { t.Run(test.what, func(t *testing.T) { @@ -668,12 +653,11 @@ func TestRWFileHandleOpenTests(t *testing.T) { // tests mod time on open files func TestRWFileModTimeWithOpenWriters(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() + r, vfs, fh, cleanup := rwHandleCreateWriteOnly(t) + defer cleanup() if !canSetModTime(t, r) { - return + t.Skip("can't set mod time") } - vfs, fh := rwHandleCreateWriteOnly(t, r) mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC) @@ -700,21 +684,21 @@ func TestRWFileModTimeWithOpenWriters(t *testing.T) { } file1 := fstest.NewItem("file1", "hi", mtime) + vfs.WaitForWriters(waitForWritersDelay) fstest.CheckItems(t, r.Fremote, file1) } func TestRWCacheRename(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() + opt := vfscommon.DefaultOpt + opt.CacheMode = vfscommon.CacheModeFull + opt.WriteBack = writeBackDelay + r, vfs, cleanup := newTestVFSOpt(t, &opt) + defer cleanup() if !operations.CanServerSideMove(r.Fremote) { t.Skip("skip as can't rename files") } - opt := vfscommon.DefaultOpt - opt.CacheMode = vfscommon.CacheModeFull - vfs := New(r.Fremote, &opt) - h, err := vfs.OpenFile("rename_me", os.O_WRONLY|os.O_CREATE, 0777) require.NoError(t, err) _, err = h.WriteString("hello") diff --git a/vfs/vfs.go b/vfs/vfs.go index 258722fc5..cc016b927 100644 --- a/vfs/vfs.go +++ b/vfs/vfs.go @@ -265,7 +265,7 @@ func (vfs *VFS) FlushDirCache() { // time.Duration has elapsed func (vfs *VFS) WaitForWriters(timeout time.Duration) { defer log.Trace(nil, "timeout=%v", timeout)("") - const tickTime = 1 * time.Second + tickTime := 10 * time.Millisecond deadline := time.NewTimer(timeout) defer deadline.Stop() tick := time.NewTimer(tickTime) @@ -273,18 +273,26 @@ func (vfs *VFS) WaitForWriters(timeout time.Duration) { tick.Stop() for { writers := vfs.root.countActiveWriters() - if writers == 0 { + cacheInUse := 0 + if vfs.cache != nil { + cacheInUse = vfs.cache.TotalInUse() + } + if writers == 0 && cacheInUse == 0 { return } - fs.Debugf(nil, "Still %d writers active, waiting %v", writers, tickTime) + fs.Debugf(nil, "Still %d writers active and %d cache items in use, waiting %v", writers, cacheInUse, tickTime) tick.Reset(tickTime) select { case <-tick.C: break case <-deadline.C: - fs.Errorf(nil, "Exiting even though %d writers are active after %v", writers, timeout) + fs.Errorf(nil, "Exiting even though %d writers active and %d cache items in use after %v\n%s", writers, cacheInUse, timeout, vfs.cache.Dump()) return } + tickTime *= 2 + if tickTime > time.Second { + tickTime = time.Second + } } } diff --git a/vfs/vfs_case_test.go b/vfs/vfs_case_test.go index b20351e88..933187000 100644 --- a/vfs/vfs_case_test.go +++ b/vfs/vfs_case_test.go @@ -36,10 +36,12 @@ func TestCaseSensitivity(t *testing.T) { optCS := vfscommon.DefaultOpt optCS.CaseInsensitive = false vfsCS := New(r.Fremote, &optCS) + defer cleanupVFS(t, vfsCS) optCI := vfscommon.DefaultOpt optCI.CaseInsensitive = true vfsCI := New(r.Fremote, &optCI) + defer cleanupVFS(t, vfsCI) // Run basic checks that must pass on VFS of any type. assertFileDataVFS(t, vfsCI, "FiLeA", "data1") diff --git a/vfs/vfs_test.go b/vfs/vfs_test.go index 7cfc35155..a6c7f7714 100644 --- a/vfs/vfs_test.go +++ b/vfs/vfs_test.go @@ -8,6 +8,7 @@ import ( "io" "os" "testing" + "time" "github.com/pkg/errors" _ "github.com/rclone/rclone/backend/all" // import all the backends @@ -25,11 +26,41 @@ var ( t3 = fstest.Time("2011-12-30T12:59:59.000000000Z") ) +// Constants uses in the tests +const ( + writeBackDelay = 100 * time.Millisecond // A short writeback delay for testing + waitForWritersDelay = 10 * time.Second // time to wait for exiting writiers +) + // TestMain drives the tests func TestMain(m *testing.M) { fstest.TestMain(m) } +// Clean up a test VFS +func cleanupVFS(t *testing.T, vfs *VFS) { + vfs.WaitForWriters(waitForWritersDelay) + err := vfs.CleanUp() + require.NoError(t, err) + vfs.Shutdown() +} + +// Create a new VFS +func newTestVFSOpt(t *testing.T, opt *vfscommon.Options) (r *fstest.Run, vfs *VFS, cleanup func()) { + r = fstest.NewRun(t) + vfs = New(r.Fremote, opt) + cleanup = func() { + cleanupVFS(t, vfs) + r.Finalise() + } + return r, vfs, cleanup +} + +// Create a new VFS with default options +func newTestVFS(t *testing.T) (r *fstest.Run, vfs *VFS, cleanup func()) { + return newTestVFSOpt(t, nil) +} + // Check baseHandle performs as advertised func TestVFSbaseHandle(t *testing.T) { fh := baseHandle{} @@ -97,31 +128,33 @@ func TestVFSbaseHandle(t *testing.T) { // TestNew sees if the New command works properly func TestVFSNew(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() + r, vfs, cleanup := newTestVFS(t) + defer cleanup() // Check making a VFS with nil options - vfs := New(r.Fremote, nil) var defaultOpt = vfscommon.DefaultOpt defaultOpt.DirPerms |= os.ModeDir assert.Equal(t, vfs.Opt, defaultOpt) assert.Equal(t, vfs.f, r.Fremote) +} - // Check the initialisation +// TestNew sees if the New command works properly +func TestVFSNewWithOpts(t *testing.T) { var opt = vfscommon.DefaultOpt opt.DirPerms = 0777 opt.FilePerms = 0666 opt.Umask = 0002 - vfs = New(r.Fremote, &opt) + _, vfs, cleanup := newTestVFSOpt(t, &opt) + defer cleanup() + assert.Equal(t, os.FileMode(0775)|os.ModeDir, vfs.Opt.DirPerms) assert.Equal(t, os.FileMode(0664), vfs.Opt.FilePerms) } // TestRoot checks root directory is present and correct func TestVFSRoot(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs := New(r.Fremote, nil) + _, vfs, cleanup := newTestVFS(t) + defer cleanup() root, err := vfs.Root() require.NoError(t, err) @@ -131,9 +164,8 @@ func TestVFSRoot(t *testing.T) { } func TestVFSStat(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs := New(r.Fremote, nil) + r, vfs, cleanup := newTestVFS(t) + defer cleanup() file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1) file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) @@ -168,9 +200,8 @@ func TestVFSStat(t *testing.T) { } func TestVFSStatParent(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs := New(r.Fremote, nil) + r, vfs, cleanup := newTestVFS(t) + defer cleanup() file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1) file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) @@ -202,9 +233,8 @@ func TestVFSStatParent(t *testing.T) { } func TestVFSOpenFile(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs := New(r.Fremote, nil) + r, vfs, cleanup := newTestVFS(t) + defer cleanup() file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1) file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) @@ -238,13 +268,13 @@ func TestVFSOpenFile(t *testing.T) { } func TestVFSRename(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() + r, vfs, cleanup := newTestVFS(t) + defer cleanup() + features := r.Fremote.Features() if features.Move == nil && features.Copy == nil { - return // skip as can't rename files + t.Skip("skip as can't rename files") } - vfs := New(r.Fremote, nil) file1 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) fstest.CheckItems(t, r.Fremote, file1) @@ -267,9 +297,8 @@ func TestVFSRename(t *testing.T) { } func TestVFSStatfs(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs := New(r.Fremote, nil) + r, vfs, cleanup := newTestVFS(t) + defer cleanup() // pre-conditions assert.Nil(t, vfs.usage) diff --git a/vfs/vfscache/cache.go b/vfs/vfscache/cache.go index 3d1f18021..f8980c360 100644 --- a/vfs/vfscache/cache.go +++ b/vfs/vfscache/cache.go @@ -18,7 +18,6 @@ import ( fscache "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/hash" - "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/vfs/vfscommon" @@ -43,6 +42,7 @@ type Cache struct { metaRoot string // root of the cache metadata directory hashType hash.Type // hash to use locally and remotely hashOption *fs.HashesOption // corresponding OpenOption + writeback *writeBack // holds Items for writeback mu sync.Mutex // protects the following variables item map[string]*Item // files/directories in the cache @@ -87,6 +87,7 @@ func New(ctx context.Context, fremote fs.Fs, opt *vfscommon.Options) (*Cache, er item: make(map[string]*Item), hashType: hashType, hashOption: hashOption, + writeback: newWriteBack(ctx, opt), } // Make sure cache directories exist @@ -200,6 +201,20 @@ func (c *Cache) InUse(name string) bool { return item.inUse() } +// DirtyItem the Item if it exists in the cache and is Dirty +// +// name should be a remote path not an osPath +func (c *Cache) DirtyItem(name string) (item *Item) { + name = clean(name) + c.mu.Lock() + defer c.mu.Unlock() + item = c.item[name] + if item != nil && !item.IsDirty() { + item = nil + } + return item +} + // get gets a file name from the cache or creates a new one // // It returns the item and found as to whether this item was found in @@ -224,20 +239,14 @@ func (c *Cache) Item(name string) (item *Item) { return item } -// Exists checks to see if the file exists in the cache or not +// Exists checks to see if the file exists in the cache or not. // -// FIXME check the metadata exists here too? +// This is done by bringing the item into the cache which will +// validate the backing file and metadata and then asking if the Item +// exists or not. func (c *Cache) Exists(name string) bool { - osPath := c.toOSPath(name) - fi, err := os.Stat(osPath) - if err != nil { - return false - } - // checks for non-regular files (e.g. directories, symlinks, devices, etc.) - if !fi.Mode().IsRegular() { - return false - } - return true + item, _ := c.get(name) + return item.Exists() } // rename with os.Rename and more checking @@ -300,8 +309,13 @@ func (c *Cache) Rename(name string, newName string, newObj fs.Object) (err error // Remove should be called if name is deleted func (c *Cache) Remove(name string) { - item, _ := c.get(name) + name = clean(name) + c.mu.Lock() + item, _ := c._get(name) + delete(c.item, name) + c.mu.Unlock() item.remove("file deleted") + } // SetModTime should be called to set the modification time of the cache file @@ -506,9 +520,30 @@ func (c *Cache) cleaner(ctx context.Context) { } } -// Check the local file is up to date in the cache -func (c *Cache) Check(ctx context.Context, o fs.Object, remote string) (err error) { - defer log.Trace(o, "remote=%q", remote)("err=%v", &err) - item, _ := c.get(remote) - return item.checkObject(o) +// TotalInUse returns the number of items in the cache which are InUse +func (c *Cache) TotalInUse() (n int) { + c.mu.Lock() + defer c.mu.Unlock() + for _, item := range c.item { + if item.inUse() { + n++ + } + } + return n +} + +// Dump the cache into a string for debugging purposes +func (c *Cache) Dump() string { + if c == nil { + return "Cache: \n" + } + c.mu.Lock() + defer c.mu.Unlock() + var out strings.Builder + out.WriteString("Cache{\n") + for name, item := range c.item { + fmt.Fprintf(&out, "\t%q: %+v,\n", name, item) + } + out.WriteString("}\n") + return out.String() } diff --git a/vfs/vfscache/cache_test.go b/vfs/vfscache/cache_test.go index b9a317093..ed819d3e6 100644 --- a/vfs/vfscache/cache_test.go +++ b/vfs/vfscache/cache_test.go @@ -33,22 +33,59 @@ func itemAsString(c *Cache) []string { return out } -func TestCacheNew(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() +// open an item and write to it +func itemWrite(t *testing.T, item *Item, contents string) { + require.NoError(t, item.Open(nil)) + _, err := item.WriteAt([]byte(contents), 0) + require.NoError(t, err) +} + +func assertPathNotExist(t *testing.T, path string) { + _, err := os.Stat(path) + assert.True(t, os.IsNotExist(err)) +} + +func assertPathExist(t *testing.T, path string) os.FileInfo { + fi, err := os.Stat(path) + assert.NoError(t, err) + return fi +} + +func newTestCacheOpt(t *testing.T, opt vfscommon.Options) (r *fstest.Run, c *Cache, cleanup func()) { + r = fstest.NewRun(t) ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - // FIXME need to be writing to the actual file here - // need a item.WriteAt/item.ReadAt method I think - - // Disable the cache cleaner as it interferes with these tests - opt := vfscommon.DefaultOpt - opt.CachePollInterval = 0 c, err := New(ctx, r.Fremote, &opt) require.NoError(t, err) + cleanup = func() { + err := c.CleanUp() + require.NoError(t, err) + assertPathNotExist(t, c.root) + cancel() + r.Finalise() + } + + return r, c, cleanup +} + +func newTestCache(t *testing.T) (r *fstest.Run, c *Cache, cleanup func()) { + opt := vfscommon.DefaultOpt + + // Disable the cache cleaner as it interferes with these tests + opt.CachePollInterval = 0 + + // Enable synchronous write + opt.WriteBack = 0 + + return newTestCacheOpt(t, opt) +} + +func TestCacheNew(t *testing.T) { + r, c, cleanup := newTestCache(t) + defer cleanup() + assert.Contains(t, c.root, "vfs") assert.Contains(t, c.fcache.Root(), filepath.Base(r.Fremote.Root())) assert.Equal(t, []string(nil), itemAsString(c)) @@ -59,8 +96,7 @@ func TestCacheNew(t *testing.T) { assert.Equal(t, "potato", filepath.Base(p)) assert.Equal(t, []string(nil), itemAsString(c)) - fi, err := os.Stat(filepath.Dir(p)) - require.NoError(t, err) + fi := assertPathExist(t, filepath.Dir(p)) assert.True(t, fi.IsDir()) // get @@ -85,13 +121,6 @@ func TestCacheNew(t *testing.T) { require.NoError(t, potato.Truncate(5)) atime := time.Now() potato.info.ATime = atime - // err = ioutil.WriteFile(p, []byte("hello"), 0600) - // require.NoError(t, err) - - // read its atime - - // updateAtimes - //potato.ATime = time.Now().Add(-24 * time.Hour) assert.Equal(t, []string{ `name="potato" opens=1 size=5`, @@ -100,8 +129,7 @@ func TestCacheNew(t *testing.T) { // try purging with file open c.purgeOld(10 * time.Second) - // _, err = os.Stat(p) - // assert.NoError(t, err) + assertPathExist(t, p) // close assert.Equal(t, []string{ @@ -121,35 +149,19 @@ func TestCacheNew(t *testing.T) { // try purging with file closed c.purgeOld(10 * time.Second) - // ...nothing should happend - // _, err = os.Stat(p) - // assert.NoError(t, err) + assertPathExist(t, p) //.. purge again with -ve age c.purgeOld(-10 * time.Second) - _, err = os.Stat(p) - assert.True(t, os.IsNotExist(err)) + assertPathNotExist(t, p) // clean - have tested the internals already c.clean() - - // cleanup - err = c.CleanUp() - require.NoError(t, err) - _, err = os.Stat(c.root) - assert.True(t, os.IsNotExist(err)) } func TestCacheOpens(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - c, err := New(ctx, r.Fremote, &vfscommon.DefaultOpt) - require.NoError(t, err) - defer func() { require.NoError(t, c.CleanUp()) }() + _, c, cleanup := newTestCache(t) + defer cleanup() assert.Equal(t, []string(nil), itemAsString(c)) potato := c.Item("potato") @@ -197,18 +209,8 @@ func TestCacheOpens(t *testing.T) { // test the open, mkdir, purge, close, purge sequence func TestCacheOpenMkdir(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Disable the cache cleaner as it interferes with these tests - opt := vfscommon.DefaultOpt - opt.CachePollInterval = 0 - c, err := New(ctx, r.Fremote, &opt) - require.NoError(t, err) - defer func() { require.NoError(t, c.CleanUp()) }() + _, c, cleanup := newTestCache(t) + defer cleanup() // open potato := c.Item("sub/potato") @@ -227,16 +229,14 @@ func TestCacheOpenMkdir(t *testing.T) { }, itemAsString(c)) // test directory exists - fi, err := os.Stat(filepath.Dir(p)) - require.NoError(t, err) + fi := assertPathExist(t, filepath.Dir(p)) assert.True(t, fi.IsDir()) // clean the cache c.purgeOld(-10 * time.Second) // test directory still exists - fi, err = os.Stat(filepath.Dir(p)) - require.NoError(t, err) + fi = assertPathExist(t, filepath.Dir(p)) assert.True(t, fi.IsDir()) // close @@ -252,39 +252,23 @@ func TestCacheOpenMkdir(t *testing.T) { assert.Equal(t, []string(nil), itemAsString(c)) - // FIXME test directory does not exist - // _, err = os.Stat(filepath.Dir(p)) - // require.True(t, os.IsNotExist(err)) + // test directory does not exist + assertPathNotExist(t, filepath.Dir(p)) } func TestCachePurgeOld(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - c, err := New(ctx, r.Fremote, &vfscommon.DefaultOpt) - require.NoError(t, err) - defer func() { require.NoError(t, c.CleanUp()) }() + _, c, cleanup := newTestCache(t) + defer cleanup() // Test funcs var removed []string - //removedDir := true removeFile := func(item *Item) { removed = append(removed, item.name) item._remove("TestCachePurgeOld") } - // removeDir := func(name string) bool { - // if removedDir { - // removed = append(removed, filepath.ToSlash(name)+"/") - // } - // return removedDir - // } removed = nil c._purgeOld(-10*time.Second, removeFile) - // FIXME c._purgeEmptyDirs(removeDir) assert.Equal(t, []string(nil), removed) potato2 := c.Item("sub/dir2/potato2") @@ -300,9 +284,7 @@ func TestCachePurgeOld(t *testing.T) { }, itemAsString(c)) removed = nil - // removedDir = true c._purgeOld(-10*time.Second, removeFile) - // FIXME c._purgeEmptyDirs(removeDir) assert.Equal(t, []string{ "sub/dir2/potato2", }, removed) @@ -310,9 +292,7 @@ func TestCachePurgeOld(t *testing.T) { require.NoError(t, potato.Close(nil)) removed = nil - // removedDir = true c._purgeOld(-10*time.Second, removeFile) - // FIXME c._purgeEmptyDirs(removeDir) assert.Equal(t, []string(nil), removed) require.NoError(t, potato.Close(nil)) @@ -322,9 +302,7 @@ func TestCachePurgeOld(t *testing.T) { }, itemAsString(c)) removed = nil - // removedDir = false c._purgeOld(10*time.Second, removeFile) - // FIXME c._purgeEmptyDirs(removeDir) assert.Equal(t, []string(nil), removed) assert.Equal(t, []string{ @@ -332,9 +310,7 @@ func TestCachePurgeOld(t *testing.T) { }, itemAsString(c)) removed = nil - // removedDir = true c._purgeOld(-10*time.Second, removeFile) - // FIXME c._purgeEmptyDirs(removeDir) assert.Equal(t, []string{ "sub/dir/potato", }, removed) @@ -343,17 +319,8 @@ func TestCachePurgeOld(t *testing.T) { } func TestCachePurgeOverQuota(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Disable the cache cleaner as it interferes with these tests - opt := vfscommon.DefaultOpt - opt.CachePollInterval = 0 - c, err := New(ctx, r.Fremote, &opt) - require.NoError(t, err) + _, c, cleanup := newTestCache(t) + defer cleanup() // Test funcs var removed []string @@ -376,12 +343,10 @@ func TestCachePurgeOverQuota(t *testing.T) { // Make some test files potato := c.Item("sub/dir/potato") - require.NoError(t, potato.Open(nil)) - require.NoError(t, potato.Truncate(5)) + itemWrite(t, potato, "hello") potato2 := c.Item("sub/dir2/potato2") - require.NoError(t, potato2.Open(nil)) - require.NoError(t, potato2.Truncate(6)) + itemWrite(t, potato2, "hello2") assert.Equal(t, []string{ `name="sub/dir/potato" opens=1 size=5`, @@ -407,7 +372,6 @@ func TestCachePurgeOverQuota(t *testing.T) { // make potato2 definitely after potato t1 := time.Now().Add(10 * time.Second) - require.NoError(t, potato2.Truncate(6)) potato2.info.ATime = t1 // Check only potato removed to get below quota @@ -470,3 +434,210 @@ func TestCachePurgeOverQuota(t *testing.T) { assert.Equal(t, int64(0), c.used) assert.Equal(t, []string(nil), itemAsString(c)) } + +func TestCacheInUse(t *testing.T) { + _, c, cleanup := newTestCache(t) + defer cleanup() + + assert.False(t, c.InUse("potato")) + + potato := c.Item("potato") + + assert.False(t, c.InUse("potato")) + + require.NoError(t, potato.Open(nil)) + + assert.True(t, c.InUse("potato")) + + require.NoError(t, potato.Close(nil)) + + assert.False(t, c.InUse("potato")) +} + +func TestCacheDirtyItem(t *testing.T) { + _, c, cleanup := newTestCache(t) + defer cleanup() + + assert.Nil(t, c.DirtyItem("potato")) + + potato := c.Item("potato") + + assert.Nil(t, c.DirtyItem("potato")) + + require.NoError(t, potato.Open(nil)) + require.NoError(t, potato.Truncate(5)) + + assert.Equal(t, potato, c.DirtyItem("potato")) + + require.NoError(t, potato.Close(nil)) + + assert.Nil(t, c.DirtyItem("potato")) +} + +func TestCacheExistsAndRemove(t *testing.T) { + _, c, cleanup := newTestCache(t) + defer cleanup() + + assert.False(t, c.Exists("potato")) + + potato := c.Item("potato") + + assert.False(t, c.Exists("potato")) + + require.NoError(t, potato.Open(nil)) + + assert.True(t, c.Exists("potato")) + + require.NoError(t, potato.Close(nil)) + + assert.True(t, c.Exists("potato")) + + c.Remove("potato") + + assert.False(t, c.Exists("potato")) + +} + +func TestCacheRename(t *testing.T) { + _, c, cleanup := newTestCache(t) + defer cleanup() + + // setup + + assert.False(t, c.Exists("potato")) + potato := c.Item("potato") + require.NoError(t, potato.Open(nil)) + require.NoError(t, potato.Close(nil)) + assert.True(t, c.Exists("potato")) + + osPath := c.toOSPath("potato") + osPathMeta := c.toOSPathMeta("potato") + assertPathExist(t, osPath) + assertPathExist(t, osPathMeta) + + // rename potato -> newPotato + + require.NoError(t, c.Rename("potato", "newPotato", nil)) + assertPathNotExist(t, osPath) + assertPathNotExist(t, osPathMeta) + assert.False(t, c.Exists("potato")) + + osPath = c.toOSPath("newPotato") + osPathMeta = c.toOSPathMeta("newPotato") + assertPathExist(t, osPath) + assertPathExist(t, osPathMeta) + assert.True(t, c.Exists("newPotato")) + + // rename newPotato -> sub/newPotato + + require.NoError(t, c.Rename("newPotato", "sub/newPotato", nil)) + assertPathNotExist(t, osPath) + assertPathNotExist(t, osPathMeta) + assert.False(t, c.Exists("potato")) + + osPath = c.toOSPath("sub/newPotato") + osPathMeta = c.toOSPathMeta("sub/newPotato") + assertPathExist(t, osPath) + assertPathExist(t, osPathMeta) + assert.True(t, c.Exists("sub/newPotato")) + + // remove + + c.Remove("sub/newPotato") + assertPathNotExist(t, osPath) + assertPathNotExist(t, osPathMeta) + assert.False(t, c.Exists("sub/newPotato")) + + // non existent file - is ignored + assert.NoError(t, c.Rename("nonexist", "nonexist2", nil)) +} + +func TestCacheCleaner(t *testing.T) { + opt := vfscommon.DefaultOpt + opt.CachePollInterval = 10 * time.Millisecond + opt.CacheMaxAge = 20 * time.Millisecond + _, c, cleanup := newTestCacheOpt(t, opt) + defer cleanup() + + time.Sleep(2 * opt.CachePollInterval) + + potato := c.Item("potato") + potato2, found := c.get("potato") + assert.Equal(t, potato, potato2) + assert.True(t, found) + + time.Sleep(10 * opt.CachePollInterval) + + potato2, found = c.get("potato") + assert.NotEqual(t, potato, potato2) + assert.False(t, found) +} + +func TestCacheSetModTime(t *testing.T) { + _, c, cleanup := newTestCache(t) + defer cleanup() + + t1 := time.Date(2010, 1, 2, 3, 4, 5, 9, time.UTC) + + potato := c.Item("potato") + require.NoError(t, potato.Open(nil)) + require.NoError(t, potato.Truncate(5)) + require.NoError(t, potato.Close(nil)) + + c.SetModTime("potato", t1) + osPath := potato.c.toOSPath("potato") + fi, err := os.Stat(osPath) + require.NoError(t, err) + + fstest.AssertTimeEqualWithPrecision(t, "potato", t1, fi.ModTime(), time.Second) +} + +func TestCacheTotaInUse(t *testing.T) { + _, c, cleanup := newTestCache(t) + defer cleanup() + + assert.Equal(t, int(0), c.TotalInUse()) + + potato := c.Item("potato") + assert.Equal(t, int(0), c.TotalInUse()) + + require.NoError(t, potato.Open(nil)) + assert.Equal(t, int(1), c.TotalInUse()) + + require.NoError(t, potato.Truncate(5)) + assert.Equal(t, int(1), c.TotalInUse()) + + potato2 := c.Item("potato2") + assert.Equal(t, int(1), c.TotalInUse()) + + require.NoError(t, potato2.Open(nil)) + assert.Equal(t, int(2), c.TotalInUse()) + + require.NoError(t, potato2.Close(nil)) + assert.Equal(t, int(1), c.TotalInUse()) + + require.NoError(t, potato.Close(nil)) + assert.Equal(t, int(0), c.TotalInUse()) +} + +func TestCacheDump(t *testing.T) { + _, c, cleanup := newTestCache(t) + defer cleanup() + + out := (*Cache)(nil).Dump() + assert.Equal(t, "Cache: \n", out) + + out = c.Dump() + assert.Equal(t, "Cache{\n}\n", out) + + c.Item("potato") + + out = c.Dump() + want := "Cache{\n\t\"potato\": " + assert.Equal(t, want, out[:len(want)]) + + c.Remove("potato") + + out = c.Dump() + assert.Equal(t, "Cache{\n}\n", out) +} diff --git a/vfs/vfscache/downloader.go b/vfs/vfscache/downloader.go index ea16b45fd..28530fa00 100644 --- a/vfs/vfscache/downloader.go +++ b/vfs/vfscache/downloader.go @@ -60,11 +60,12 @@ func newDownloader(item *Item, fcache fs.Fs, remote string, src fs.Object) (dl * if err == nil { // do nothing } else if os.IsNotExist(err) { - fs.Debugf(src, "creating empty file") - err = item._truncateToCurrentSize() - if err != nil { - return nil, errors.Wrap(err, "newDownloader: failed to create empty file") - } + return nil, errors.New("vfs cache: internal error: newDownloader: called before Item.Open") + // fs.Debugf(src, "creating empty file") + // err = item._truncateToCurrentSize() + // if err != nil { + // return nil, errors.Wrap(err, "newDownloader: failed to create empty file") + // } } else { return nil, errors.Wrap(err, "newDownloader: failed to stat cache file") } diff --git a/vfs/vfscache/item.go b/vfs/vfscache/item.go index aaa848907..5ebf8a48c 100644 --- a/vfs/vfscache/item.go +++ b/vfs/vfscache/item.go @@ -145,6 +145,13 @@ func (item *Item) getATime() time.Time { return item.info.ATime } +// getName returns the name of the item +func (item *Item) getName() string { + item.mu.Lock() + defer item.mu.Unlock() + return item.name +} + // getDiskSize returns the size on disk (approximately) of the item // // We return the sizes of the chunks we have fetched, however there is @@ -374,6 +381,13 @@ func (item *Item) Dirty() { item.mu.Unlock() } +// IsDirty returns true if the item is dirty +func (item *Item) IsDirty() bool { + item.mu.Lock() + defer item.mu.Unlock() + return item.metaDirty || item.info.Dirty +} + // Open the local file from the object passed in (which may be nil) // which implies we are about to create the file func (item *Item) Open(o fs.Object) (err error) { @@ -445,10 +459,9 @@ func (item *Item) Open(o fs.Object) (err error) { // Store stores the local cache file to the remote object, returning // the new remote object. objOld is the old object if known. // -// call with item lock held -func (item *Item) _store() (err error) { +// Call with lock held +func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) { defer log.Trace(item.name, "item=%p", item)("err=%v", &err) - ctx := context.Background() // Ensure any segments not transferred are brought in err = item._ensure(0, item.info.Size) @@ -462,23 +475,51 @@ func (item *Item) _store() (err error) { return errors.Wrap(err, "vfs cache: failed to find cache file") } + item.mu.Unlock() o, err := operations.Copy(ctx, item.c.fremote, item.o, item.name, cacheObj) + item.mu.Lock() if err != nil { return errors.Wrap(err, "vfs cache: failed to transfer file from cache to remote") } item.o = o item._updateFingerprint() + item.info.Dirty = false + err = item._save() + if err != nil { + fs.Errorf(item.name, "Failed to write metadata file: %v", err) + } + if storeFn != nil && item.o != nil { + // Write the object back to the VFS layer as last + // thing we do with mutex unlocked + item.mu.Unlock() + storeFn(item.o) + item.mu.Lock() + } return nil } +// Store stores the local cache file to the remote object, returning +// the new remote object. objOld is the old object if known. +func (item *Item) store(ctx context.Context, storeFn StoreFn) (err error) { + item.mu.Lock() + defer item.mu.Unlock() + return item._store(ctx, storeFn) +} + // Close the cache file func (item *Item) Close(storeFn StoreFn) (err error) { defer log.Trace(item.o, "Item.Close")("err=%v", &err) var ( - downloader *downloader - o fs.Object + downloader *downloader + syncWriteBack = item.c.opt.WriteBack <= 0 ) - // close downloader and set item with mutex unlocked + // FIXME need to unlock to kill downloader - should we + // re-arrange locking so this isn't necessary? maybe + // downloader should use the item mutex for locking? or put a + // finer lock on Rs? + // + // close downloader with mutex unlocked + // downloader.Write calls ensure which needs the lock defer func() { if downloader != nil { closeErr := downloader.close(nil) @@ -486,9 +527,11 @@ func (item *Item) Close(storeFn StoreFn) (err error) { err = closeErr } } - if err == nil && storeFn != nil && o != nil { - // Write the object back to the VFS layer - storeFn(item.o) + // save the metadata once more since it may be dirty + // after the downloader + saveErr := item._save() + if saveErr != nil && err == nil { + err = errors.Wrap(saveErr, "close failed to save item") } }() item.mu.Lock() @@ -533,15 +576,14 @@ func (item *Item) Close(storeFn StoreFn) (err error) { // upload the file to backing store if changed if item.info.Dirty { - fs.Debugf(item.name, "item changed - writeback") - err = item._store() - if err != nil { - fs.Errorf(item.name, "%v", err) - return err + fs.Debugf(item.name, "item changed - writeback in %v", item.c.opt.WriteBack) + if syncWriteBack { + // do synchronous writeback + err = item._store(context.Background(), storeFn) + } else { + // asynchronous writeback + item.c.writeback.add(item, storeFn) } - fs.Debugf(item.o, "transferred to remote") - item.info.Dirty = false - o = item.o } return err @@ -590,16 +632,6 @@ func (item *Item) _checkObject(o fs.Object) error { return nil } -// check the fingerprint of an object and update the item or delete -// the cached file accordingly. -// -// It ensures the file is the correct size for the object -func (item *Item) checkObject(o fs.Object) error { - item.mu.Lock() - defer item.mu.Unlock() - return item._checkObject(o) -} - // remove the cached file // // call with lock held @@ -760,6 +792,10 @@ func (item *Item) setModTime(modTime time.Time) { item.mu.Lock() item._updateFingerprint() item._setModTime(modTime) + err := item._save() + if err != nil { + fs.Errorf(item.name, "vfs cache: setModTime: failed to save item info: %v", err) + } item.mu.Unlock() } @@ -796,6 +832,10 @@ func (item *Item) WriteAt(b []byte, off int64) (n int, err error) { if n > 0 { item._dirty() } + end := off + int64(n) + if end > item.info.Size { + item.info.Size = end + } item.mu.Unlock() return n, err } diff --git a/vfs/vfscache/writeback.go b/vfs/vfscache/writeback.go new file mode 100644 index 000000000..846f082e2 --- /dev/null +++ b/vfs/vfscache/writeback.go @@ -0,0 +1,311 @@ +// This keeps track of the files which need to be written back + +package vfscache + +import ( + "container/heap" + "context" + "sync" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/vfs/vfscommon" +) + +const ( + uploadDelay = 10 * time.Second // delay betwen upload attempts + maxUploadAttempts = 10 // max number of times to try to upload +) + +// writeBack keeps track of the items which need to be written back to the disk at some point +type writeBack struct { + mu sync.Mutex + items writeBackItems // priority queue of *writeBackItem + lookup map[*Item]*writeBackItem // for getting a *writeBackItem from a *Item + opt *vfscommon.Options // VFS options + timer *time.Timer // next scheduled time for the uploader + kick chan struct{} // send on this channel to wake up the uploader + uploads int // number of uploads in progress +} + +// make a new writeBack +// +// cancel the context to stop the background goroutine +func newWriteBack(ctx context.Context, opt *vfscommon.Options) *writeBack { + wb := &writeBack{ + items: writeBackItems{}, + lookup: make(map[*Item]*writeBackItem), + opt: opt, + timer: time.NewTimer(time.Second), + kick: make(chan struct{}, 1), + } + wb.timer.Stop() + heap.Init(&wb.items) + go wb.uploader(ctx) + return wb +} + +// writeBackItem stores an Item awaiting writeback +// +// writeBack.mu must be held to manipulate this +type writeBackItem struct { + index int // index into the priority queue for update + item *Item // Item that needs writeback + expiry time.Time // When this expires we will write it back + uploading bool // If we are uploading the item + cancel context.CancelFunc // To cancel the upload with + storeFn StoreFn // To write the object back with + tries int // number of times we have tried to upload + delay time.Duration // delay between upload attempts +} + +// A writeBackItems implements a priority queue by implementing +// heap.Interface and holds writeBackItems. +type writeBackItems []*writeBackItem + +func (ws writeBackItems) Len() int { return len(ws) } + +func (ws writeBackItems) Less(i, j int) bool { + return ws[i].expiry.Sub(ws[j].expiry) < 0 +} + +func (ws writeBackItems) Swap(i, j int) { + ws[i], ws[j] = ws[j], ws[i] + ws[i].index = i + ws[j].index = j +} + +func (ws *writeBackItems) Push(x interface{}) { + n := len(*ws) + item := x.(*writeBackItem) + item.index = n + *ws = append(*ws, item) +} + +func (ws *writeBackItems) Pop() interface{} { + old := *ws + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.index = -1 // for safety + *ws = old[0 : n-1] + return item +} + +// update modifies the expiry of an Item in the queue. +// +// call with lock held +func (ws *writeBackItems) _update(item *writeBackItem, expiry time.Time) { + item.expiry = expiry + heap.Fix(ws, item.index) +} + +// return a new expiry time based from now until the WriteBack timeout +// +// call with lock held +func (wb *writeBack) _newExpiry() time.Time { + expiry := time.Now() + if wb.opt.WriteBack > 0 { + expiry = expiry.Add(wb.opt.WriteBack) + } + return expiry +} + +// make a new writeBackItem +// +// call with the lock held +func (wb *writeBack) _newItem(item *Item) *writeBackItem { + wbItem := &writeBackItem{ + item: item, + expiry: wb._newExpiry(), + delay: uploadDelay, + } + wb._addItem(wbItem) + wb._pushItem(wbItem) + return wbItem +} + +// add a writeBackItem to the lookup map +// +// call with the lock held +func (wb *writeBack) _addItem(wbItem *writeBackItem) { + wb.lookup[wbItem.item] = wbItem +} + +// delete a writeBackItem from the lookup map +// +// call with the lock held +func (wb *writeBack) _delItem(wbItem *writeBackItem) { + delete(wb.lookup, wbItem.item) +} + +// pop a writeBackItem from the items heap +// +// call with the lock held +func (wb *writeBack) _popItem() (wbItem *writeBackItem) { + return heap.Pop(&wb.items).(*writeBackItem) +} + +// push a writeBackItem onto the items heap +// +// call with the lock held +func (wb *writeBack) _pushItem(wbItem *writeBackItem) { + heap.Push(&wb.items, wbItem) +} + +// peek the oldest writeBackItem - may be nil +// +// call with the lock held +func (wb *writeBack) _peekItem() (wbItem *writeBackItem) { + if len(wb.items) == 0 { + return nil + } + return wb.items[0] +} + +// reset the timer which runs the expiries +func (wb *writeBack) _resetTimer() { + wbItem := wb._peekItem() + if wbItem == nil { + wb.timer.Stop() + } else { + dt := time.Until(wbItem.expiry) + if dt < 0 { + dt = 0 + } + wb.timer.Reset(dt) + } +} + +// add adds an item to the writeback queue or resets its timer if it +// is already there +func (wb *writeBack) add(item *Item, storeFn StoreFn) { + wb.mu.Lock() + defer wb.mu.Unlock() + + wbItem, ok := wb.lookup[item] + if !ok { + wbItem = wb._newItem(item) + } else { + if wbItem.uploading { + // We are uploading already so cancel the upload + wb._cancelUpload(wbItem) + } + // Kick the timer on + wb.items._update(wbItem, wb._newExpiry()) + } + wbItem.storeFn = storeFn + wb._resetTimer() +} + +// kick the upload checker +// +// This should be called at the end of uploads just in case we had to +// pause uploades because max items was exceeded +// +// call with the lock held +func (wb *writeBack) _kickUploader() { + select { + case wb.kick <- struct{}{}: + default: + } +} + +// upload the item - called as a goroutine +func (wb *writeBack) upload(ctx context.Context, wbItem *writeBackItem) { + wb.mu.Lock() + defer wb.mu.Unlock() + item := wbItem.item + wbItem.tries++ + + wb.mu.Unlock() + err := item.store(ctx, wbItem.storeFn) + wb.mu.Lock() + + wbItem.cancel() // cancel context to release resources since store done + if wbItem.uploading { + wbItem.uploading = false + wb.uploads-- + } + + if err != nil { + if wbItem.tries < maxUploadAttempts { + fs.Errorf(item.getName(), "vfs cache: failed to upload, will retry in %v: %v", wb.opt.WriteBack, err) + // push the item back on the queue for retry + wb._pushItem(wbItem) + wb.items._update(wbItem, time.Now().Add(wbItem.delay)) + wbItem.delay *= 2 + } else { + fs.Errorf(item.getName(), "vfs cache: failed to upload, will retry in %v: %v", wb.opt.WriteBack, err) + } + } else { + // show that we are done with the item + wb._delItem(wbItem) + } + wb._kickUploader() +} + +// cancel the upload +// +// call with lock held +func (wb *writeBack) _cancelUpload(wbItem *writeBackItem) { + if !wbItem.uploading { + return + } + fs.Debugf(wbItem.item.getName(), "vfs cache: canceling upload") + if wbItem.cancel != nil { + // Cancel the upload - this may or may not be effective + // we don't wait for the completion + wbItem.cancel() + } + if wbItem.uploading { + wbItem.uploading = false + wb.uploads-- + } + // uploading items are not on the heap so add them back + wb._pushItem(wbItem) +} + +// this uploads as many items as possible +func (wb *writeBack) processItems(ctx context.Context) { + wb.mu.Lock() + defer wb.mu.Unlock() + + resetTimer := false + for wbItem := wb._peekItem(); wbItem != nil && time.Until(wbItem.expiry) <= 0; wbItem = wb._peekItem() { + // If reached transfer limit don't restart the timer + if wb.uploads >= fs.Config.Transfers { + fs.Debugf(wbItem.item.getName(), "vfs cache: delaying writeback as --transfers exceeded") + resetTimer = false + break + } + resetTimer = true + // Pop the item, mark as uploading and start the uploader + wbItem = wb._popItem() + wbItem.uploading = true + wb.uploads++ + newCtx, cancel := context.WithCancel(ctx) + wbItem.cancel = cancel + go wb.upload(newCtx, wbItem) + } + + if resetTimer { + wb._resetTimer() + } +} + +// Looks for items which need writing back and write them back until +// the context is cancelled +func (wb *writeBack) uploader(ctx context.Context) { + for { + select { + case <-ctx.Done(): + wb.timer.Stop() + return + case <-wb.timer.C: + wb.processItems(ctx) + case <-wb.kick: + wb.processItems(ctx) + } + } +} diff --git a/vfs/vfscommon/options.go b/vfs/vfscommon/options.go index 24c99b0cd..047888753 100644 --- a/vfs/vfscommon/options.go +++ b/vfs/vfscommon/options.go @@ -30,6 +30,7 @@ type Options struct { CaseInsensitive bool WriteWait time.Duration // time to wait for in-sequence write ReadWait time.Duration // time to wait for in-sequence read + WriteBack time.Duration // time to wait before writing back dirty files } // DefaultOpt is the default values uses for Opt @@ -54,4 +55,5 @@ var DefaultOpt = Options{ CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise WriteWait: 1000 * time.Millisecond, ReadWait: 20 * time.Millisecond, + WriteBack: 5 * time.Second, } diff --git a/vfs/vfsflags/vfsflags.go b/vfs/vfsflags/vfsflags.go index f7c452cc6..54a4d213c 100644 --- a/vfs/vfsflags/vfsflags.go +++ b/vfs/vfsflags/vfsflags.go @@ -35,5 +35,6 @@ func AddFlags(flagSet *pflag.FlagSet) { flags.BoolVarP(flagSet, &Opt.CaseInsensitive, "vfs-case-insensitive", "", Opt.CaseInsensitive, "If a file name not found, find a case insensitive match.") flags.DurationVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", Opt.WriteWait, "Time to wait for in-sequence write before giving error.") flags.DurationVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", Opt.ReadWait, "Time to wait for in-sequence read before seeking.") + flags.DurationVarP(flagSet, &Opt.WriteBack, "vfs-writeback", "", Opt.ReadWait, "Time to writeback files after last use when using cache.") platformFlags(flagSet) } diff --git a/vfs/vfstest/fs.go b/vfs/vfstest/fs.go index 97c0786e2..b4ac33537 100644 --- a/vfs/vfstest/fs.go +++ b/vfs/vfstest/fs.go @@ -46,17 +46,25 @@ var ( func RunTests(t *testing.T, useVFS bool, fn MountFn) { mountFn = fn flag.Parse() - cacheModes := []vfscommon.CacheMode{ - vfscommon.CacheModeOff, - vfscommon.CacheModeMinimal, - vfscommon.CacheModeWrites, - vfscommon.CacheModeFull, + tests := []struct { + cacheMode vfscommon.CacheMode + writeBack time.Duration + }{ + {cacheMode: vfscommon.CacheModeOff}, + {cacheMode: vfscommon.CacheModeMinimal}, + {cacheMode: vfscommon.CacheModeWrites}, + {cacheMode: vfscommon.CacheModeFull}, + {cacheMode: vfscommon.CacheModeFull, writeBack: 100 * time.Millisecond}, } run = newRun(useVFS) - for _, cacheMode := range cacheModes { - run.cacheMode(cacheMode) - log.Printf("Starting test run with cache mode %v", cacheMode) - ok := t.Run(fmt.Sprintf("CacheMode=%v", cacheMode), func(t *testing.T) { + for _, test := range tests { + run.cacheMode(test.cacheMode, test.writeBack) + what := fmt.Sprintf("CacheMode=%v", test.cacheMode) + if test.writeBack > 0 { + what += fmt.Sprintf(",WriteBack=%v", test.writeBack) + } + log.Printf("Starting test run with %s", what) + ok := t.Run(what, func(t *testing.T) { t.Run("TestTouchAndDelete", TestTouchAndDelete) t.Run("TestRenameOpenHandle", TestRenameOpenHandle) t.Run("TestDirLs", TestDirLs) @@ -84,7 +92,7 @@ func RunTests(t *testing.T, useVFS bool, fn MountFn) { t.Run("TestWriteFileDup", TestWriteFileDup) t.Run("TestWriteFileAppend", TestWriteFileAppend) }) - log.Printf("Finished test run with cache mode %v (ok=%v)", cacheMode, ok) + log.Printf("Finished test run with %s (ok=%v)", what, ok) if !ok { break } @@ -218,8 +226,8 @@ func (r *Run) umount() { } } -// cacheMode flushes the VFS and changes the CacheMode -func (r *Run) cacheMode(cacheMode vfscommon.CacheMode) { +// cacheMode flushes the VFS and changes the CacheMode and the writeBack time +func (r *Run) cacheMode(cacheMode vfscommon.CacheMode, writeBack time.Duration) { if r.skip { log.Printf("FUSE not found so skipping cacheMode") return @@ -239,6 +247,7 @@ func (r *Run) cacheMode(cacheMode vfscommon.CacheMode) { } // Reset the cache mode r.vfs.SetCacheMode(cacheMode) + r.vfs.Opt.WriteBack = writeBack // Flush the directory cache r.vfs.FlushDirCache() diff --git a/vfs/write_test.go b/vfs/write_test.go index c5f3ee434..431ae084e 100644 --- a/vfs/write_test.go +++ b/vfs/write_test.go @@ -17,21 +17,20 @@ import ( ) // Open a file for write -func writeHandleCreate(t *testing.T, r *fstest.Run) (*VFS, *WriteFileHandle) { - vfs := New(r.Fremote, nil) +func writeHandleCreate(t *testing.T) (r *fstest.Run, vfs *VFS, fh *WriteFileHandle, cleanup func()) { + r, vfs, cleanup = newTestVFS(t) h, err := vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777) require.NoError(t, err) fh, ok := h.(*WriteFileHandle) require.True(t, ok) - return vfs, fh + return r, vfs, fh, cleanup } func TestWriteFileHandleMethods(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, fh := writeHandleCreate(t, r) + r, vfs, fh, cleanup := writeHandleCreate(t) + defer cleanup() // String assert.Equal(t, "file1 (w)", fh.String()) @@ -133,9 +132,8 @@ func TestWriteFileHandleMethods(t *testing.T) { } func TestWriteFileHandleWriteAt(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, fh := writeHandleCreate(t, r) + r, vfs, fh, cleanup := writeHandleCreate(t) + defer cleanup() // Preconditions assert.Equal(t, int64(0), fh.offset) @@ -179,9 +177,8 @@ func TestWriteFileHandleWriteAt(t *testing.T) { } func TestWriteFileHandleFlush(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, fh := writeHandleCreate(t, r) + _, vfs, fh, cleanup := writeHandleCreate(t) + defer cleanup() // Check Flush already creates file for unwritten handles, without closing it err := fh.Flush() @@ -213,9 +210,8 @@ func TestWriteFileHandleFlush(t *testing.T) { } func TestWriteFileHandleRelease(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() - _, fh := writeHandleCreate(t, r) + _, _, fh, cleanup := writeHandleCreate(t) + defer cleanup() // Check Release closes file err := fh.Release() @@ -262,12 +258,12 @@ func canSetModTime(t *testing.T, r *fstest.Run) bool { // tests mod time on open files func TestWriteFileModTimeWithOpenWriters(t *testing.T) { - r := fstest.NewRun(t) - defer r.Finalise() + r, vfs, fh, cleanup := writeHandleCreate(t) + defer cleanup() + if !canSetModTime(t, r) { - return + t.Skip("can't set mod time") } - vfs, fh := writeHandleCreate(t, r) mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC) @@ -290,9 +286,8 @@ func TestWriteFileModTimeWithOpenWriters(t *testing.T) { } func testFileReadAt(t *testing.T, n int) { - r := fstest.NewRun(t) - defer r.Finalise() - vfs, fh := writeHandleCreate(t, r) + _, vfs, fh, cleanup := writeHandleCreate(t) + defer cleanup() contents := []byte(random.String(n)) if n != 0 {