vfs: add --vfs-writeback option to delay writes back to cloud storage

This is enabled by default and can be disabled with --vfs-writeback 0
This commit is contained in:
Nick Craig-Wood 2020-04-17 11:18:58 +01:00
parent 28255f1bac
commit e4e53a2e61
19 changed files with 958 additions and 385 deletions

View File

@ -12,9 +12,8 @@ import (
) )
func TestDirHandleMethods(t *testing.T) { func TestDirHandleMethods(t *testing.T) {
r := fstest.NewRun(t) _, _, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
_, dir, _ := dirCreate(t, r)
h, err := dir.Open(os.O_RDONLY) h, err := dir.Open(os.O_RDONLY)
require.NoError(t, err) require.NoError(t, err)
@ -40,9 +39,8 @@ func TestDirHandleMethods(t *testing.T) {
} }
func TestDirHandleReaddir(t *testing.T) { func TestDirHandleReaddir(t *testing.T) {
r := fstest.NewRun(t) r, vfs, cleanup := newTestVFS(t)
defer r.Finalise() defer cleanup()
vfs := New(r.Fremote, nil)
file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1)
file2 := r.WriteObject(context.Background(), "dir/file2", "file2- contents", t2) file2 := r.WriteObject(context.Background(), "dir/file2", "file2- contents", t2)
@ -96,9 +94,8 @@ func TestDirHandleReaddir(t *testing.T) {
} }
func TestDirHandleReaddirnames(t *testing.T) { func TestDirHandleReaddirnames(t *testing.T) {
r := fstest.NewRun(t) _, _, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
_, dir, _ := dirCreate(t, r)
fh, err := dir.Open(os.O_RDONLY) fh, err := dir.Open(os.O_RDONLY)
require.NoError(t, err) require.NoError(t, err)

View File

@ -14,8 +14,8 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func dirCreate(t *testing.T, r *fstest.Run) (*VFS, *Dir, fstest.Item) { func dirCreate(t *testing.T) (r *fstest.Run, vfs *VFS, dir *Dir, item fstest.Item, cleanup func()) {
vfs := New(r.Fremote, nil) r, vfs, cleanup = newTestVFS(t)
file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
@ -24,13 +24,12 @@ func dirCreate(t *testing.T, r *fstest.Run) (*VFS, *Dir, fstest.Item) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, node.IsDir()) require.True(t, node.IsDir())
return vfs, node.(*Dir), file1 return r, vfs, node.(*Dir), file1, cleanup
} }
func TestDirMethods(t *testing.T) { func TestDirMethods(t *testing.T) {
r := fstest.NewRun(t) _, vfs, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, _ := dirCreate(t, r)
// String // String
assert.Equal(t, "dir/", dir.String()) assert.Equal(t, "dir/", dir.String())
@ -81,9 +80,8 @@ func TestDirMethods(t *testing.T) {
} }
func TestDirForgetAll(t *testing.T) { func TestDirForgetAll(t *testing.T) {
r := fstest.NewRun(t) _, vfs, dir, file1, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, file1 := dirCreate(t, r)
// Make sure / and dir are in cache // Make sure / and dir are in cache
_, err := vfs.Stat(file1.Path) _, err := vfs.Stat(file1.Path)
@ -110,9 +108,8 @@ func TestDirForgetAll(t *testing.T) {
} }
func TestDirForgetPath(t *testing.T) { func TestDirForgetPath(t *testing.T) {
r := fstest.NewRun(t) _, vfs, dir, file1, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, file1 := dirCreate(t, r)
// Make sure / and dir are in cache // Make sure / and dir are in cache
_, err := vfs.Stat(file1.Path) _, err := vfs.Stat(file1.Path)
@ -143,9 +140,8 @@ func TestDirForgetPath(t *testing.T) {
} }
func TestDirWalk(t *testing.T) { func TestDirWalk(t *testing.T) {
r := fstest.NewRun(t) r, vfs, _, file1, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, _, file1 := dirCreate(t, r)
file2 := r.WriteObject(context.Background(), "fil/a/b/c", "super long file", t1) file2 := r.WriteObject(context.Background(), "fil/a/b/c", "super long file", t1)
fstest.CheckItems(t, r.Fremote, file1, file2) fstest.CheckItems(t, r.Fremote, file1, file2)
@ -213,9 +209,8 @@ func TestDirWalk(t *testing.T) {
} }
func TestDirSetModTime(t *testing.T) { func TestDirSetModTime(t *testing.T) {
r := fstest.NewRun(t) _, vfs, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, _ := dirCreate(t, r)
err := dir.SetModTime(t1) err := dir.SetModTime(t1)
require.NoError(t, err) require.NoError(t, err)
@ -231,9 +226,8 @@ func TestDirSetModTime(t *testing.T) {
} }
func TestDirStat(t *testing.T) { func TestDirStat(t *testing.T) {
r := fstest.NewRun(t) _, _, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
_, dir, _ := dirCreate(t, r)
node, err := dir.Stat("file1") node, err := dir.Stat("file1")
require.NoError(t, err) require.NoError(t, err)
@ -258,9 +252,8 @@ func checkListing(t *testing.T, dir *Dir, want []string) {
} }
func TestDirReadDirAll(t *testing.T) { func TestDirReadDirAll(t *testing.T) {
r := fstest.NewRun(t) r, vfs, cleanup := newTestVFS(t)
defer r.Finalise() defer cleanup()
vfs := New(r.Fremote, nil)
file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1)
file2 := r.WriteObject(context.Background(), "dir/file2", "file2- contents", t2) file2 := r.WriteObject(context.Background(), "dir/file2", "file2- contents", t2)
@ -287,9 +280,8 @@ func TestDirReadDirAll(t *testing.T) {
} }
func TestDirOpen(t *testing.T) { func TestDirOpen(t *testing.T) {
r := fstest.NewRun(t) _, _, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
_, dir, _ := dirCreate(t, r)
fd, err := dir.Open(os.O_RDONLY) fd, err := dir.Open(os.O_RDONLY)
require.NoError(t, err) require.NoError(t, err)
@ -302,9 +294,8 @@ func TestDirOpen(t *testing.T) {
} }
func TestDirCreate(t *testing.T) { func TestDirCreate(t *testing.T) {
r := fstest.NewRun(t) _, vfs, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, _ := dirCreate(t, r)
file, err := dir.Create("potato", os.O_WRONLY|os.O_CREATE) file, err := dir.Create("potato", os.O_WRONLY|os.O_CREATE)
require.NoError(t, err) require.NoError(t, err)
@ -336,9 +327,8 @@ func TestDirCreate(t *testing.T) {
} }
func TestDirMkdir(t *testing.T) { func TestDirMkdir(t *testing.T) {
r := fstest.NewRun(t) r, vfs, dir, file1, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, file1 := dirCreate(t, r)
_, err := dir.Mkdir("file1") _, err := dir.Mkdir("file1")
assert.Error(t, err) assert.Error(t, err)
@ -359,9 +349,8 @@ func TestDirMkdir(t *testing.T) {
} }
func TestDirMkdirSub(t *testing.T) { func TestDirMkdirSub(t *testing.T) {
r := fstest.NewRun(t) r, vfs, dir, file1, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, file1 := dirCreate(t, r)
_, err := dir.Mkdir("file1") _, err := dir.Mkdir("file1")
assert.Error(t, err) assert.Error(t, err)
@ -386,9 +375,8 @@ func TestDirMkdirSub(t *testing.T) {
} }
func TestDirRemove(t *testing.T) { func TestDirRemove(t *testing.T) {
r := fstest.NewRun(t) r, vfs, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, _ := dirCreate(t, r)
// check directory is there // check directory is there
node, err := vfs.Stat("dir") node, err := vfs.Stat("dir")
@ -427,9 +415,8 @@ func TestDirRemove(t *testing.T) {
} }
func TestDirRemoveAll(t *testing.T) { func TestDirRemoveAll(t *testing.T) {
r := fstest.NewRun(t) r, vfs, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, _ := dirCreate(t, r)
// Remove the directory and contents // Remove the directory and contents
err := dir.RemoveAll() err := dir.RemoveAll()
@ -450,9 +437,8 @@ func TestDirRemoveAll(t *testing.T) {
} }
func TestDirRemoveName(t *testing.T) { func TestDirRemoveName(t *testing.T) {
r := fstest.NewRun(t) r, vfs, dir, _, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
vfs, dir, _ := dirCreate(t, r)
err := dir.RemoveName("file1") err := dir.RemoveName("file1")
require.NoError(t, err) require.NoError(t, err)
@ -471,15 +457,14 @@ func TestDirRemoveName(t *testing.T) {
} }
func TestDirRename(t *testing.T) { func TestDirRename(t *testing.T) {
r := fstest.NewRun(t) r, vfs, dir, file1, cleanup := dirCreate(t)
defer r.Finalise() defer cleanup()
features := r.Fremote.Features() features := r.Fremote.Features()
if features.DirMove == nil && features.Move == nil && features.Copy == nil { if features.DirMove == nil && features.Move == nil && features.Copy == nil {
return // skip as can't rename directories t.Skip("can't rename directories")
} }
vfs, dir, file1 := dirCreate(t, r)
file3 := r.WriteObject(context.Background(), "dir/file3", "file3 contents!", t1) file3 := r.WriteObject(context.Background(), "dir/file3", "file3 contents!", t1)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file3}, []string{"dir"}, r.Fremote.Precision()) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file3}, []string{"dir"}, r.Fremote.Precision())

View File

@ -325,6 +325,18 @@ func (f *File) Size() int64 {
f.mu.RLock() f.mu.RLock()
defer f.mu.RUnlock() defer f.mu.RUnlock()
// Read the size from a dirty item if it exists
if f.d.vfs.Opt.CacheMode >= vfscommon.CacheModeMinimal {
if item := f.d.vfs.cache.DirtyItem(f._path()); item != nil {
size, err := item.GetSize()
if err != nil {
fs.Errorf(f._path(), "Size: Item GetSize failed: %v", err)
} else {
return size
}
}
}
// if o is nil it isn't valid yet or there are writers, so return the size so far // if o is nil it isn't valid yet or there are writers, so return the size so far
if f._writingInProgress() { if f._writingInProgress() {
return atomic.LoadInt64(&f.size) return atomic.LoadInt64(&f.size)

View File

@ -16,10 +16,11 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func fileCreate(t *testing.T, r *fstest.Run, mode vfscommon.CacheMode) (*VFS, *File, fstest.Item) { func fileCreate(t *testing.T, mode vfscommon.CacheMode) (r *fstest.Run, vfs *VFS, fh *File, item fstest.Item, cleanup func()) {
opt := vfscommon.DefaultOpt opt := vfscommon.DefaultOpt
opt.CacheMode = mode opt.CacheMode = mode
vfs := New(r.Fremote, &opt) opt.WriteBack = writeBackDelay
r, vfs, cleanup = newTestVFSOpt(t, &opt)
file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
@ -28,13 +29,12 @@ func fileCreate(t *testing.T, r *fstest.Run, mode vfscommon.CacheMode) (*VFS, *F
require.NoError(t, err) require.NoError(t, err)
require.True(t, node.Mode().IsRegular()) require.True(t, node.Mode().IsRegular())
return vfs, node.(*File), file1 return r, vfs, node.(*File), file1, cleanup
} }
func TestFileMethods(t *testing.T) { func TestFileMethods(t *testing.T) {
r := fstest.NewRun(t) r, vfs, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff)
defer r.Finalise() defer cleanup()
vfs, file, _ := fileCreate(t, r, vfscommon.CacheModeOff)
// String // String
assert.Equal(t, "dir/file1", file.String()) assert.Equal(t, "dir/file1", file.String())
@ -88,12 +88,11 @@ func TestFileMethods(t *testing.T) {
} }
func TestFileSetModTime(t *testing.T) { func TestFileSetModTime(t *testing.T) {
r := fstest.NewRun(t) r, vfs, file, file1, cleanup := fileCreate(t, vfscommon.CacheModeOff)
defer cleanup()
if !canSetModTime(t, r) { if !canSetModTime(t, r) {
return t.Skip("can't set mod time")
} }
defer r.Finalise()
vfs, file, file1 := fileCreate(t, r, vfscommon.CacheModeOff)
err := file.SetModTime(t2) err := file.SetModTime(t2)
require.NoError(t, err) require.NoError(t, err)
@ -118,9 +117,8 @@ func fileCheckContents(t *testing.T, file *File) {
} }
func TestFileOpenRead(t *testing.T) { func TestFileOpenRead(t *testing.T) {
r := fstest.NewRun(t) _, _, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff)
defer r.Finalise() defer cleanup()
_, file, _ := fileCreate(t, r, vfscommon.CacheModeOff)
fileCheckContents(t, file) fileCheckContents(t, file)
} }
@ -146,6 +144,7 @@ func TestFileOpenReadUnknownSize(t *testing.T) {
// create a VFS from that mockfs // create a VFS from that mockfs
vfs := New(f, nil) vfs := New(f, nil)
defer cleanupVFS(t, vfs)
// find the file // find the file
node, err := vfs.Stat(remote) node, err := vfs.Stat(remote)
@ -171,9 +170,8 @@ func TestFileOpenReadUnknownSize(t *testing.T) {
} }
func TestFileOpenWrite(t *testing.T) { func TestFileOpenWrite(t *testing.T) {
r := fstest.NewRun(t) _, vfs, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff)
defer r.Finalise() defer cleanup()
vfs, file, _ := fileCreate(t, r, vfscommon.CacheModeOff)
fd, err := file.openWrite(os.O_WRONLY | os.O_TRUNC) fd, err := file.openWrite(os.O_WRONLY | os.O_TRUNC)
require.NoError(t, err) require.NoError(t, err)
@ -192,9 +190,8 @@ func TestFileOpenWrite(t *testing.T) {
} }
func TestFileRemove(t *testing.T) { func TestFileRemove(t *testing.T) {
r := fstest.NewRun(t) r, vfs, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff)
defer r.Finalise() defer cleanup()
vfs, file, _ := fileCreate(t, r, vfscommon.CacheModeOff)
err := file.Remove() err := file.Remove()
require.NoError(t, err) require.NoError(t, err)
@ -207,9 +204,8 @@ func TestFileRemove(t *testing.T) {
} }
func TestFileRemoveAll(t *testing.T) { func TestFileRemoveAll(t *testing.T) {
r := fstest.NewRun(t) r, vfs, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff)
defer r.Finalise() defer cleanup()
vfs, file, _ := fileCreate(t, r, vfscommon.CacheModeOff)
err := file.RemoveAll() err := file.RemoveAll()
require.NoError(t, err) require.NoError(t, err)
@ -222,9 +218,8 @@ func TestFileRemoveAll(t *testing.T) {
} }
func TestFileOpen(t *testing.T) { func TestFileOpen(t *testing.T) {
r := fstest.NewRun(t) _, _, file, _, cleanup := fileCreate(t, vfscommon.CacheModeOff)
defer r.Finalise() defer cleanup()
_, file, _ := fileCreate(t, r, vfscommon.CacheModeOff)
fd, err := file.Open(os.O_RDONLY) fd, err := file.Open(os.O_RDONLY)
require.NoError(t, err) require.NoError(t, err)
@ -242,15 +237,15 @@ func TestFileOpen(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
_, ok = fd.(*WriteFileHandle) _, ok = fd.(*WriteFileHandle)
assert.True(t, ok) assert.True(t, ok)
require.NoError(t, fd.Close())
_, err = file.Open(3) _, err = file.Open(3)
assert.Equal(t, EPERM, err) assert.Equal(t, EPERM, err)
} }
func testFileRename(t *testing.T, mode vfscommon.CacheMode) { func testFileRename(t *testing.T, mode vfscommon.CacheMode) {
r := fstest.NewRun(t) r, vfs, file, item, cleanup := fileCreate(t, mode)
defer r.Finalise() defer cleanup()
vfs, file, item := fileCreate(t, r, mode)
if !operations.CanServerSideMove(r.Fremote) { if !operations.CanServerSideMove(r.Fremote) {
t.Skip("skip as can't rename files") t.Skip("skip as can't rename files")
@ -326,6 +321,7 @@ func testFileRename(t *testing.T, mode vfscommon.CacheMode) {
// Check file has now been renamed on the remote // Check file has now been renamed on the remote
item.Path = "newLeaf" item.Path = "newLeaf"
vfs.WaitForWriters(waitForWritersDelay)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{newItem}, nil, fs.ModTimeNotSupported) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{newItem}, nil, fs.ModTimeNotSupported)
} }

View File

@ -12,8 +12,8 @@ import (
) )
// Open a file for write // Open a file for write
func readHandleCreate(t *testing.T, r *fstest.Run) (*VFS, *ReadFileHandle) { func readHandleCreate(t *testing.T) (r *fstest.Run, vfs *VFS, fh *ReadFileHandle, cleanup func()) {
vfs := New(r.Fremote, nil) r, vfs, cleanup = newTestVFS(t)
file1 := r.WriteObject(context.Background(), "dir/file1", "0123456789abcdef", t1) file1 := r.WriteObject(context.Background(), "dir/file1", "0123456789abcdef", t1)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
@ -23,7 +23,7 @@ func readHandleCreate(t *testing.T, r *fstest.Run) (*VFS, *ReadFileHandle) {
fh, ok := h.(*ReadFileHandle) fh, ok := h.(*ReadFileHandle)
require.True(t, ok) require.True(t, ok)
return vfs, fh return r, vfs, fh, cleanup
} }
// read data from the string // read data from the string
@ -37,9 +37,8 @@ func readString(t *testing.T, fh *ReadFileHandle, n int) string {
} }
func TestReadFileHandleMethods(t *testing.T) { func TestReadFileHandleMethods(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := readHandleCreate(t)
defer r.Finalise() defer cleanup()
_, fh := readHandleCreate(t, r)
// String // String
assert.Equal(t, "dir/file1 (r)", fh.String()) assert.Equal(t, "dir/file1 (r)", fh.String())
@ -81,9 +80,8 @@ func TestReadFileHandleMethods(t *testing.T) {
} }
func TestReadFileHandleSeek(t *testing.T) { func TestReadFileHandleSeek(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := readHandleCreate(t)
defer r.Finalise() defer cleanup()
_, fh := readHandleCreate(t, r)
assert.Equal(t, "0", readString(t, fh, 1)) assert.Equal(t, "0", readString(t, fh, 1))
@ -125,9 +123,8 @@ func TestReadFileHandleSeek(t *testing.T) {
} }
func TestReadFileHandleReadAt(t *testing.T) { func TestReadFileHandleReadAt(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := readHandleCreate(t)
defer r.Finalise() defer cleanup()
_, fh := readHandleCreate(t, r)
// read from start // read from start
buf := make([]byte, 1) buf := make([]byte, 1)
@ -182,9 +179,8 @@ func TestReadFileHandleReadAt(t *testing.T) {
} }
func TestReadFileHandleFlush(t *testing.T) { func TestReadFileHandleFlush(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := readHandleCreate(t)
defer r.Finalise() defer cleanup()
_, fh := readHandleCreate(t, r)
// Check Flush does nothing if read not called // Check Flush does nothing if read not called
err := fh.Flush() err := fh.Flush()
@ -212,9 +208,8 @@ func TestReadFileHandleFlush(t *testing.T) {
} }
func TestReadFileHandleRelease(t *testing.T) { func TestReadFileHandleRelease(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := readHandleCreate(t)
defer r.Finalise() defer cleanup()
_, fh := readHandleCreate(t, r)
// Check Release does nothing if file not read from // Check Release does nothing if file not read from
err := fh.Release() err := fh.Release()

View File

@ -36,7 +36,7 @@ func newRWFileHandle(d *Dir, f *File, flags int) (fh *RWFileHandle, err error) {
// get an item to represent this from the cache // get an item to represent this from the cache
item := d.vfs.cache.Item(f.Path()) item := d.vfs.cache.Item(f.Path())
exists := f.exists() // || item.Exists() exists := f.exists() || item.Exists()
// if O_CREATE and O_EXCL are set and if path already exists, then return EEXIST // if O_CREATE and O_EXCL are set and if path already exists, then return EEXIST
if flags&(os.O_CREATE|os.O_EXCL) == os.O_CREATE|os.O_EXCL && exists { if flags&(os.O_CREATE|os.O_EXCL) == os.O_CREATE|os.O_EXCL && exists {
@ -51,7 +51,7 @@ func newRWFileHandle(d *Dir, f *File, flags int) (fh *RWFileHandle, err error) {
} }
// truncate immediately if O_TRUNC is set or O_CREATE is set and file doesn't exist // truncate immediately if O_TRUNC is set or O_CREATE is set and file doesn't exist
if !fh.readOnly() && (fh.flags&os.O_TRUNC != 0 || (fh.flags&os.O_CREATE != 0 && !(exists || item.Exists()))) { if !fh.readOnly() && (fh.flags&os.O_TRUNC != 0 || (fh.flags&os.O_CREATE != 0 && !exists)) {
err = fh.Truncate(0) err = fh.Truncate(0)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "cache open with O_TRUNC: failed to truncate") return nil, errors.Wrap(err, "cache open with O_TRUNC: failed to truncate")

View File

@ -29,17 +29,12 @@ var (
_ Handle = (*RWFileHandle)(nil) _ Handle = (*RWFileHandle)(nil)
) )
func cleanup(t *testing.T, r *fstest.Run, vfs *VFS) {
assert.NoError(t, vfs.CleanUp())
vfs.Shutdown()
r.Finalise()
}
// Create a file and open it with the flags passed in // Create a file and open it with the flags passed in
func rwHandleCreateFlags(t *testing.T, r *fstest.Run, create bool, filename string, flags int) (*VFS, *RWFileHandle) { func rwHandleCreateFlags(t *testing.T, create bool, filename string, flags int) (r *fstest.Run, vfs *VFS, fh *RWFileHandle, cleanup func()) {
opt := vfscommon.DefaultOpt opt := vfscommon.DefaultOpt
opt.CacheMode = vfscommon.CacheModeFull opt.CacheMode = vfscommon.CacheModeFull
vfs := New(r.Fremote, &opt) opt.WriteBack = writeBackDelay
r, vfs, cleanup = newTestVFSOpt(t, &opt)
if create { if create {
file1 := r.WriteObject(context.Background(), filename, "0123456789abcdef", t1) file1 := r.WriteObject(context.Background(), filename, "0123456789abcdef", t1)
@ -51,17 +46,17 @@ func rwHandleCreateFlags(t *testing.T, r *fstest.Run, create bool, filename stri
fh, ok := h.(*RWFileHandle) fh, ok := h.(*RWFileHandle)
require.True(t, ok) require.True(t, ok)
return vfs, fh return r, vfs, fh, cleanup
} }
// Open a file for read // Open a file for read
func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) { func rwHandleCreateReadOnly(t *testing.T) (r *fstest.Run, vfs *VFS, fh *RWFileHandle, cleanup func()) {
return rwHandleCreateFlags(t, r, true, "dir/file1", os.O_RDONLY) return rwHandleCreateFlags(t, true, "dir/file1", os.O_RDONLY)
} }
// Open a file for write // Open a file for write
func rwHandleCreateWriteOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) { func rwHandleCreateWriteOnly(t *testing.T) (r *fstest.Run, vfs *VFS, fh *RWFileHandle, cleanup func()) {
return rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE) return rwHandleCreateFlags(t, false, "file1", os.O_WRONLY|os.O_CREATE)
} }
// read data from the string // read data from the string
@ -75,9 +70,8 @@ func rwReadString(t *testing.T, fh *RWFileHandle, n int) string {
} }
func TestRWFileHandleMethodsRead(t *testing.T) { func TestRWFileHandleMethodsRead(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := rwHandleCreateReadOnly(t)
vfs, fh := rwHandleCreateReadOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
// String // String
assert.Equal(t, "dir/file1 (rw)", fh.String()) assert.Equal(t, "dir/file1 (rw)", fh.String())
@ -123,9 +117,8 @@ func TestRWFileHandleMethodsRead(t *testing.T) {
} }
func TestRWFileHandleSeek(t *testing.T) { func TestRWFileHandleSeek(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := rwHandleCreateReadOnly(t)
vfs, fh := rwHandleCreateReadOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
assert.Equal(t, fh.opened, false) assert.Equal(t, fh.opened, false)
@ -174,9 +167,8 @@ func TestRWFileHandleSeek(t *testing.T) {
} }
func TestRWFileHandleReadAt(t *testing.T) { func TestRWFileHandleReadAt(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := rwHandleCreateReadOnly(t)
vfs, fh := rwHandleCreateReadOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
// read from start // read from start
buf := make([]byte, 1) buf := make([]byte, 1)
@ -225,9 +217,8 @@ func TestRWFileHandleReadAt(t *testing.T) {
} }
func TestRWFileHandleFlushRead(t *testing.T) { func TestRWFileHandleFlushRead(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := rwHandleCreateReadOnly(t)
vfs, fh := rwHandleCreateReadOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
// Check Flush does nothing if read not called // Check Flush does nothing if read not called
err := fh.Flush() err := fh.Flush()
@ -255,9 +246,8 @@ func TestRWFileHandleFlushRead(t *testing.T) {
} }
func TestRWFileHandleReleaseRead(t *testing.T) { func TestRWFileHandleReleaseRead(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := rwHandleCreateReadOnly(t)
vfs, fh := rwHandleCreateReadOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
// Read data // Read data
buf := make([]byte, 256) buf := make([]byte, 256)
@ -279,9 +269,8 @@ func TestRWFileHandleReleaseRead(t *testing.T) {
/// ------------------------------------------------------------ /// ------------------------------------------------------------
func TestRWFileHandleMethodsWrite(t *testing.T) { func TestRWFileHandleMethodsWrite(t *testing.T) {
r := fstest.NewRun(t) r, vfs, fh, cleanup := rwHandleCreateWriteOnly(t)
vfs, fh := rwHandleCreateWriteOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
// String // String
assert.Equal(t, "file1 (rw)", fh.String()) assert.Equal(t, "file1 (rw)", fh.String())
@ -351,13 +340,13 @@ func TestRWFileHandleMethodsWrite(t *testing.T) {
// check the underlying r.Fremote but not the modtime // check the underlying r.Fremote but not the modtime
file1 := fstest.NewItem("file1", "hello world", t1) file1 := fstest.NewItem("file1", "hello world", t1)
vfs.WaitForWriters(waitForWritersDelay)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported)
} }
func TestRWFileHandleWriteAt(t *testing.T) { func TestRWFileHandleWriteAt(t *testing.T) {
r := fstest.NewRun(t) r, vfs, fh, cleanup := rwHandleCreateWriteOnly(t)
vfs, fh := rwHandleCreateWriteOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
offset := func() int64 { offset := func() int64 {
n, err := fh.Seek(0, io.SeekCurrent) n, err := fh.Seek(0, io.SeekCurrent)
@ -399,13 +388,13 @@ func TestRWFileHandleWriteAt(t *testing.T) {
// check the underlying r.Fremote but not the modtime // check the underlying r.Fremote but not the modtime
file1 := fstest.NewItem("file1", "hello world", t1) file1 := fstest.NewItem("file1", "hello world", t1)
vfs.WaitForWriters(waitForWritersDelay)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported)
} }
func TestRWFileHandleWriteNoWrite(t *testing.T) { func TestRWFileHandleWriteNoWrite(t *testing.T) {
r := fstest.NewRun(t) r, vfs, fh, cleanup := rwHandleCreateWriteOnly(t)
vfs, fh := rwHandleCreateWriteOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
// Close the file without writing to it // Close the file without writing to it
err := fh.Close() err := fh.Close()
@ -433,13 +422,13 @@ func TestRWFileHandleWriteNoWrite(t *testing.T) {
// check the underlying r.Fremote but not the modtime // check the underlying r.Fremote but not the modtime
file1 := fstest.NewItem("file1", "", t1) file1 := fstest.NewItem("file1", "", t1)
file2 := fstest.NewItem("file2", "", t1) file2 := fstest.NewItem("file2", "", t1)
vfs.WaitForWriters(waitForWritersDelay)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2}, []string{}, fs.ModTimeNotSupported) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2}, []string{}, fs.ModTimeNotSupported)
} }
func TestRWFileHandleFlushWrite(t *testing.T) { func TestRWFileHandleFlushWrite(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := rwHandleCreateWriteOnly(t)
vfs, fh := rwHandleCreateWriteOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
// Check that the file has been create and is open // Check that the file has been create and is open
assert.True(t, fh.opened) assert.True(t, fh.opened)
@ -467,9 +456,8 @@ func TestRWFileHandleFlushWrite(t *testing.T) {
} }
func TestRWFileHandleReleaseWrite(t *testing.T) { func TestRWFileHandleReleaseWrite(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := rwHandleCreateWriteOnly(t)
vfs, fh := rwHandleCreateWriteOnly(t, r) defer cleanup()
defer cleanup(t, r, vfs)
// Write some data // Write some data
n, err := fh.Write([]byte("hello")) n, err := fh.Write([]byte("hello"))
@ -498,9 +486,8 @@ func assertSize(t *testing.T, vfs *VFS, fh *RWFileHandle, filepath string, size
} }
func TestRWFileHandleSizeTruncateExisting(t *testing.T) { func TestRWFileHandleSizeTruncateExisting(t *testing.T) {
r := fstest.NewRun(t) _, vfs, fh, cleanup := rwHandleCreateFlags(t, true, "dir/file1", os.O_WRONLY|os.O_TRUNC)
vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_TRUNC) defer cleanup()
defer cleanup(t, r, vfs)
// check initial size after opening // check initial size after opening
assertSize(t, vfs, fh, "dir/file1", 0) assertSize(t, vfs, fh, "dir/file1", 0)
@ -521,9 +508,8 @@ func TestRWFileHandleSizeTruncateExisting(t *testing.T) {
} }
func TestRWFileHandleSizeCreateExisting(t *testing.T) { func TestRWFileHandleSizeCreateExisting(t *testing.T) {
r := fstest.NewRun(t) _, vfs, fh, cleanup := rwHandleCreateFlags(t, true, "dir/file1", os.O_WRONLY|os.O_CREATE)
vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_CREATE) defer cleanup()
defer cleanup(t, r, vfs)
// check initial size after opening // check initial size after opening
assertSize(t, vfs, fh, "dir/file1", 16) assertSize(t, vfs, fh, "dir/file1", 16)
@ -552,9 +538,8 @@ func TestRWFileHandleSizeCreateExisting(t *testing.T) {
} }
func TestRWFileHandleSizeCreateNew(t *testing.T) { func TestRWFileHandleSizeCreateNew(t *testing.T) {
r := fstest.NewRun(t) _, vfs, fh, cleanup := rwHandleCreateFlags(t, false, "file1", os.O_WRONLY|os.O_CREATE)
vfs, fh := rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE) defer cleanup()
defer cleanup(t, r, vfs)
// check initial size after opening // check initial size after opening
assertSize(t, vfs, fh, "file1", 0) assertSize(t, vfs, fh, "file1", 0)
@ -653,11 +638,11 @@ func testRWFileHandleOpenTest(t *testing.T, vfs *VFS, test *openTest) {
} }
func TestRWFileHandleOpenTests(t *testing.T) { func TestRWFileHandleOpenTests(t *testing.T) {
r := fstest.NewRun(t)
opt := vfscommon.DefaultOpt opt := vfscommon.DefaultOpt
opt.CacheMode = vfscommon.CacheModeFull opt.CacheMode = vfscommon.CacheModeFull
vfs := New(r.Fremote, &opt) opt.WriteBack = writeBackDelay
defer cleanup(t, r, vfs) _, vfs, cleanup := newTestVFSOpt(t, &opt)
defer cleanup()
for _, test := range openTests { for _, test := range openTests {
t.Run(test.what, func(t *testing.T) { t.Run(test.what, func(t *testing.T) {
@ -668,12 +653,11 @@ func TestRWFileHandleOpenTests(t *testing.T) {
// tests mod time on open files // tests mod time on open files
func TestRWFileModTimeWithOpenWriters(t *testing.T) { func TestRWFileModTimeWithOpenWriters(t *testing.T) {
r := fstest.NewRun(t) r, vfs, fh, cleanup := rwHandleCreateWriteOnly(t)
defer r.Finalise() defer cleanup()
if !canSetModTime(t, r) { if !canSetModTime(t, r) {
return t.Skip("can't set mod time")
} }
vfs, fh := rwHandleCreateWriteOnly(t, r)
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC) mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
@ -700,21 +684,21 @@ func TestRWFileModTimeWithOpenWriters(t *testing.T) {
} }
file1 := fstest.NewItem("file1", "hi", mtime) file1 := fstest.NewItem("file1", "hi", mtime)
vfs.WaitForWriters(waitForWritersDelay)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
} }
func TestRWCacheRename(t *testing.T) { func TestRWCacheRename(t *testing.T) {
r := fstest.NewRun(t) opt := vfscommon.DefaultOpt
defer r.Finalise() opt.CacheMode = vfscommon.CacheModeFull
opt.WriteBack = writeBackDelay
r, vfs, cleanup := newTestVFSOpt(t, &opt)
defer cleanup()
if !operations.CanServerSideMove(r.Fremote) { if !operations.CanServerSideMove(r.Fremote) {
t.Skip("skip as can't rename files") t.Skip("skip as can't rename files")
} }
opt := vfscommon.DefaultOpt
opt.CacheMode = vfscommon.CacheModeFull
vfs := New(r.Fremote, &opt)
h, err := vfs.OpenFile("rename_me", os.O_WRONLY|os.O_CREATE, 0777) h, err := vfs.OpenFile("rename_me", os.O_WRONLY|os.O_CREATE, 0777)
require.NoError(t, err) require.NoError(t, err)
_, err = h.WriteString("hello") _, err = h.WriteString("hello")

View File

@ -265,7 +265,7 @@ func (vfs *VFS) FlushDirCache() {
// time.Duration has elapsed // time.Duration has elapsed
func (vfs *VFS) WaitForWriters(timeout time.Duration) { func (vfs *VFS) WaitForWriters(timeout time.Duration) {
defer log.Trace(nil, "timeout=%v", timeout)("") defer log.Trace(nil, "timeout=%v", timeout)("")
const tickTime = 1 * time.Second tickTime := 10 * time.Millisecond
deadline := time.NewTimer(timeout) deadline := time.NewTimer(timeout)
defer deadline.Stop() defer deadline.Stop()
tick := time.NewTimer(tickTime) tick := time.NewTimer(tickTime)
@ -273,18 +273,26 @@ func (vfs *VFS) WaitForWriters(timeout time.Duration) {
tick.Stop() tick.Stop()
for { for {
writers := vfs.root.countActiveWriters() writers := vfs.root.countActiveWriters()
if writers == 0 { cacheInUse := 0
if vfs.cache != nil {
cacheInUse = vfs.cache.TotalInUse()
}
if writers == 0 && cacheInUse == 0 {
return return
} }
fs.Debugf(nil, "Still %d writers active, waiting %v", writers, tickTime) fs.Debugf(nil, "Still %d writers active and %d cache items in use, waiting %v", writers, cacheInUse, tickTime)
tick.Reset(tickTime) tick.Reset(tickTime)
select { select {
case <-tick.C: case <-tick.C:
break break
case <-deadline.C: case <-deadline.C:
fs.Errorf(nil, "Exiting even though %d writers are active after %v", writers, timeout) fs.Errorf(nil, "Exiting even though %d writers active and %d cache items in use after %v\n%s", writers, cacheInUse, timeout, vfs.cache.Dump())
return return
} }
tickTime *= 2
if tickTime > time.Second {
tickTime = time.Second
}
} }
} }

View File

@ -36,10 +36,12 @@ func TestCaseSensitivity(t *testing.T) {
optCS := vfscommon.DefaultOpt optCS := vfscommon.DefaultOpt
optCS.CaseInsensitive = false optCS.CaseInsensitive = false
vfsCS := New(r.Fremote, &optCS) vfsCS := New(r.Fremote, &optCS)
defer cleanupVFS(t, vfsCS)
optCI := vfscommon.DefaultOpt optCI := vfscommon.DefaultOpt
optCI.CaseInsensitive = true optCI.CaseInsensitive = true
vfsCI := New(r.Fremote, &optCI) vfsCI := New(r.Fremote, &optCI)
defer cleanupVFS(t, vfsCI)
// Run basic checks that must pass on VFS of any type. // Run basic checks that must pass on VFS of any type.
assertFileDataVFS(t, vfsCI, "FiLeA", "data1") assertFileDataVFS(t, vfsCI, "FiLeA", "data1")

View File

@ -8,6 +8,7 @@ import (
"io" "io"
"os" "os"
"testing" "testing"
"time"
"github.com/pkg/errors" "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/all" // import all the backends _ "github.com/rclone/rclone/backend/all" // import all the backends
@ -25,11 +26,41 @@ var (
t3 = fstest.Time("2011-12-30T12:59:59.000000000Z") t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
) )
// Constants uses in the tests
const (
writeBackDelay = 100 * time.Millisecond // A short writeback delay for testing
waitForWritersDelay = 10 * time.Second // time to wait for exiting writiers
)
// TestMain drives the tests // TestMain drives the tests
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
fstest.TestMain(m) fstest.TestMain(m)
} }
// Clean up a test VFS
func cleanupVFS(t *testing.T, vfs *VFS) {
vfs.WaitForWriters(waitForWritersDelay)
err := vfs.CleanUp()
require.NoError(t, err)
vfs.Shutdown()
}
// Create a new VFS
func newTestVFSOpt(t *testing.T, opt *vfscommon.Options) (r *fstest.Run, vfs *VFS, cleanup func()) {
r = fstest.NewRun(t)
vfs = New(r.Fremote, opt)
cleanup = func() {
cleanupVFS(t, vfs)
r.Finalise()
}
return r, vfs, cleanup
}
// Create a new VFS with default options
func newTestVFS(t *testing.T) (r *fstest.Run, vfs *VFS, cleanup func()) {
return newTestVFSOpt(t, nil)
}
// Check baseHandle performs as advertised // Check baseHandle performs as advertised
func TestVFSbaseHandle(t *testing.T) { func TestVFSbaseHandle(t *testing.T) {
fh := baseHandle{} fh := baseHandle{}
@ -97,31 +128,33 @@ func TestVFSbaseHandle(t *testing.T) {
// TestNew sees if the New command works properly // TestNew sees if the New command works properly
func TestVFSNew(t *testing.T) { func TestVFSNew(t *testing.T) {
r := fstest.NewRun(t) r, vfs, cleanup := newTestVFS(t)
defer r.Finalise() defer cleanup()
// Check making a VFS with nil options // Check making a VFS with nil options
vfs := New(r.Fremote, nil)
var defaultOpt = vfscommon.DefaultOpt var defaultOpt = vfscommon.DefaultOpt
defaultOpt.DirPerms |= os.ModeDir defaultOpt.DirPerms |= os.ModeDir
assert.Equal(t, vfs.Opt, defaultOpt) assert.Equal(t, vfs.Opt, defaultOpt)
assert.Equal(t, vfs.f, r.Fremote) assert.Equal(t, vfs.f, r.Fremote)
}
// Check the initialisation // TestNew sees if the New command works properly
func TestVFSNewWithOpts(t *testing.T) {
var opt = vfscommon.DefaultOpt var opt = vfscommon.DefaultOpt
opt.DirPerms = 0777 opt.DirPerms = 0777
opt.FilePerms = 0666 opt.FilePerms = 0666
opt.Umask = 0002 opt.Umask = 0002
vfs = New(r.Fremote, &opt) _, vfs, cleanup := newTestVFSOpt(t, &opt)
defer cleanup()
assert.Equal(t, os.FileMode(0775)|os.ModeDir, vfs.Opt.DirPerms) assert.Equal(t, os.FileMode(0775)|os.ModeDir, vfs.Opt.DirPerms)
assert.Equal(t, os.FileMode(0664), vfs.Opt.FilePerms) assert.Equal(t, os.FileMode(0664), vfs.Opt.FilePerms)
} }
// TestRoot checks root directory is present and correct // TestRoot checks root directory is present and correct
func TestVFSRoot(t *testing.T) { func TestVFSRoot(t *testing.T) {
r := fstest.NewRun(t) _, vfs, cleanup := newTestVFS(t)
defer r.Finalise() defer cleanup()
vfs := New(r.Fremote, nil)
root, err := vfs.Root() root, err := vfs.Root()
require.NoError(t, err) require.NoError(t, err)
@ -131,9 +164,8 @@ func TestVFSRoot(t *testing.T) {
} }
func TestVFSStat(t *testing.T) { func TestVFSStat(t *testing.T) {
r := fstest.NewRun(t) r, vfs, cleanup := newTestVFS(t)
defer r.Finalise() defer cleanup()
vfs := New(r.Fremote, nil)
file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1) file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1)
file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2)
@ -168,9 +200,8 @@ func TestVFSStat(t *testing.T) {
} }
func TestVFSStatParent(t *testing.T) { func TestVFSStatParent(t *testing.T) {
r := fstest.NewRun(t) r, vfs, cleanup := newTestVFS(t)
defer r.Finalise() defer cleanup()
vfs := New(r.Fremote, nil)
file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1) file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1)
file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2)
@ -202,9 +233,8 @@ func TestVFSStatParent(t *testing.T) {
} }
func TestVFSOpenFile(t *testing.T) { func TestVFSOpenFile(t *testing.T) {
r := fstest.NewRun(t) r, vfs, cleanup := newTestVFS(t)
defer r.Finalise() defer cleanup()
vfs := New(r.Fremote, nil)
file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1) file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1)
file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2)
@ -238,13 +268,13 @@ func TestVFSOpenFile(t *testing.T) {
} }
func TestVFSRename(t *testing.T) { func TestVFSRename(t *testing.T) {
r := fstest.NewRun(t) r, vfs, cleanup := newTestVFS(t)
defer r.Finalise() defer cleanup()
features := r.Fremote.Features() features := r.Fremote.Features()
if features.Move == nil && features.Copy == nil { if features.Move == nil && features.Copy == nil {
return // skip as can't rename files t.Skip("skip as can't rename files")
} }
vfs := New(r.Fremote, nil)
file1 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) file1 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
@ -267,9 +297,8 @@ func TestVFSRename(t *testing.T) {
} }
func TestVFSStatfs(t *testing.T) { func TestVFSStatfs(t *testing.T) {
r := fstest.NewRun(t) r, vfs, cleanup := newTestVFS(t)
defer r.Finalise() defer cleanup()
vfs := New(r.Fremote, nil)
// pre-conditions // pre-conditions
assert.Nil(t, vfs.usage) assert.Nil(t, vfs.usage)

View File

@ -18,7 +18,6 @@ import (
fscache "github.com/rclone/rclone/fs/cache" fscache "github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfscommon"
@ -43,6 +42,7 @@ type Cache struct {
metaRoot string // root of the cache metadata directory metaRoot string // root of the cache metadata directory
hashType hash.Type // hash to use locally and remotely hashType hash.Type // hash to use locally and remotely
hashOption *fs.HashesOption // corresponding OpenOption hashOption *fs.HashesOption // corresponding OpenOption
writeback *writeBack // holds Items for writeback
mu sync.Mutex // protects the following variables mu sync.Mutex // protects the following variables
item map[string]*Item // files/directories in the cache item map[string]*Item // files/directories in the cache
@ -87,6 +87,7 @@ func New(ctx context.Context, fremote fs.Fs, opt *vfscommon.Options) (*Cache, er
item: make(map[string]*Item), item: make(map[string]*Item),
hashType: hashType, hashType: hashType,
hashOption: hashOption, hashOption: hashOption,
writeback: newWriteBack(ctx, opt),
} }
// Make sure cache directories exist // Make sure cache directories exist
@ -200,6 +201,20 @@ func (c *Cache) InUse(name string) bool {
return item.inUse() return item.inUse()
} }
// DirtyItem the Item if it exists in the cache and is Dirty
//
// name should be a remote path not an osPath
func (c *Cache) DirtyItem(name string) (item *Item) {
name = clean(name)
c.mu.Lock()
defer c.mu.Unlock()
item = c.item[name]
if item != nil && !item.IsDirty() {
item = nil
}
return item
}
// get gets a file name from the cache or creates a new one // get gets a file name from the cache or creates a new one
// //
// It returns the item and found as to whether this item was found in // It returns the item and found as to whether this item was found in
@ -224,20 +239,14 @@ func (c *Cache) Item(name string) (item *Item) {
return item return item
} }
// Exists checks to see if the file exists in the cache or not // Exists checks to see if the file exists in the cache or not.
// //
// FIXME check the metadata exists here too? // This is done by bringing the item into the cache which will
// validate the backing file and metadata and then asking if the Item
// exists or not.
func (c *Cache) Exists(name string) bool { func (c *Cache) Exists(name string) bool {
osPath := c.toOSPath(name) item, _ := c.get(name)
fi, err := os.Stat(osPath) return item.Exists()
if err != nil {
return false
}
// checks for non-regular files (e.g. directories, symlinks, devices, etc.)
if !fi.Mode().IsRegular() {
return false
}
return true
} }
// rename with os.Rename and more checking // rename with os.Rename and more checking
@ -300,8 +309,13 @@ func (c *Cache) Rename(name string, newName string, newObj fs.Object) (err error
// Remove should be called if name is deleted // Remove should be called if name is deleted
func (c *Cache) Remove(name string) { func (c *Cache) Remove(name string) {
item, _ := c.get(name) name = clean(name)
c.mu.Lock()
item, _ := c._get(name)
delete(c.item, name)
c.mu.Unlock()
item.remove("file deleted") item.remove("file deleted")
} }
// SetModTime should be called to set the modification time of the cache file // SetModTime should be called to set the modification time of the cache file
@ -506,9 +520,30 @@ func (c *Cache) cleaner(ctx context.Context) {
} }
} }
// Check the local file is up to date in the cache // TotalInUse returns the number of items in the cache which are InUse
func (c *Cache) Check(ctx context.Context, o fs.Object, remote string) (err error) { func (c *Cache) TotalInUse() (n int) {
defer log.Trace(o, "remote=%q", remote)("err=%v", &err) c.mu.Lock()
item, _ := c.get(remote) defer c.mu.Unlock()
return item.checkObject(o) for _, item := range c.item {
if item.inUse() {
n++
}
}
return n
}
// Dump the cache into a string for debugging purposes
func (c *Cache) Dump() string {
if c == nil {
return "Cache: <nil>\n"
}
c.mu.Lock()
defer c.mu.Unlock()
var out strings.Builder
out.WriteString("Cache{\n")
for name, item := range c.item {
fmt.Fprintf(&out, "\t%q: %+v,\n", name, item)
}
out.WriteString("}\n")
return out.String()
} }

View File

@ -33,22 +33,59 @@ func itemAsString(c *Cache) []string {
return out return out
} }
func TestCacheNew(t *testing.T) { // open an item and write to it
r := fstest.NewRun(t) func itemWrite(t *testing.T, item *Item, contents string) {
defer r.Finalise() require.NoError(t, item.Open(nil))
_, err := item.WriteAt([]byte(contents), 0)
require.NoError(t, err)
}
func assertPathNotExist(t *testing.T, path string) {
_, err := os.Stat(path)
assert.True(t, os.IsNotExist(err))
}
func assertPathExist(t *testing.T, path string) os.FileInfo {
fi, err := os.Stat(path)
assert.NoError(t, err)
return fi
}
func newTestCacheOpt(t *testing.T, opt vfscommon.Options) (r *fstest.Run, c *Cache, cleanup func()) {
r = fstest.NewRun(t)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// FIXME need to be writing to the actual file here
// need a item.WriteAt/item.ReadAt method I think
// Disable the cache cleaner as it interferes with these tests
opt := vfscommon.DefaultOpt
opt.CachePollInterval = 0
c, err := New(ctx, r.Fremote, &opt) c, err := New(ctx, r.Fremote, &opt)
require.NoError(t, err) require.NoError(t, err)
cleanup = func() {
err := c.CleanUp()
require.NoError(t, err)
assertPathNotExist(t, c.root)
cancel()
r.Finalise()
}
return r, c, cleanup
}
func newTestCache(t *testing.T) (r *fstest.Run, c *Cache, cleanup func()) {
opt := vfscommon.DefaultOpt
// Disable the cache cleaner as it interferes with these tests
opt.CachePollInterval = 0
// Enable synchronous write
opt.WriteBack = 0
return newTestCacheOpt(t, opt)
}
func TestCacheNew(t *testing.T) {
r, c, cleanup := newTestCache(t)
defer cleanup()
assert.Contains(t, c.root, "vfs") assert.Contains(t, c.root, "vfs")
assert.Contains(t, c.fcache.Root(), filepath.Base(r.Fremote.Root())) assert.Contains(t, c.fcache.Root(), filepath.Base(r.Fremote.Root()))
assert.Equal(t, []string(nil), itemAsString(c)) assert.Equal(t, []string(nil), itemAsString(c))
@ -59,8 +96,7 @@ func TestCacheNew(t *testing.T) {
assert.Equal(t, "potato", filepath.Base(p)) assert.Equal(t, "potato", filepath.Base(p))
assert.Equal(t, []string(nil), itemAsString(c)) assert.Equal(t, []string(nil), itemAsString(c))
fi, err := os.Stat(filepath.Dir(p)) fi := assertPathExist(t, filepath.Dir(p))
require.NoError(t, err)
assert.True(t, fi.IsDir()) assert.True(t, fi.IsDir())
// get // get
@ -85,13 +121,6 @@ func TestCacheNew(t *testing.T) {
require.NoError(t, potato.Truncate(5)) require.NoError(t, potato.Truncate(5))
atime := time.Now() atime := time.Now()
potato.info.ATime = atime potato.info.ATime = atime
// err = ioutil.WriteFile(p, []byte("hello"), 0600)
// require.NoError(t, err)
// read its atime
// updateAtimes
//potato.ATime = time.Now().Add(-24 * time.Hour)
assert.Equal(t, []string{ assert.Equal(t, []string{
`name="potato" opens=1 size=5`, `name="potato" opens=1 size=5`,
@ -100,8 +129,7 @@ func TestCacheNew(t *testing.T) {
// try purging with file open // try purging with file open
c.purgeOld(10 * time.Second) c.purgeOld(10 * time.Second)
// _, err = os.Stat(p) assertPathExist(t, p)
// assert.NoError(t, err)
// close // close
assert.Equal(t, []string{ assert.Equal(t, []string{
@ -121,35 +149,19 @@ func TestCacheNew(t *testing.T) {
// try purging with file closed // try purging with file closed
c.purgeOld(10 * time.Second) c.purgeOld(10 * time.Second)
// ...nothing should happend assertPathExist(t, p)
// _, err = os.Stat(p)
// assert.NoError(t, err)
//.. purge again with -ve age //.. purge again with -ve age
c.purgeOld(-10 * time.Second) c.purgeOld(-10 * time.Second)
_, err = os.Stat(p) assertPathNotExist(t, p)
assert.True(t, os.IsNotExist(err))
// clean - have tested the internals already // clean - have tested the internals already
c.clean() c.clean()
// cleanup
err = c.CleanUp()
require.NoError(t, err)
_, err = os.Stat(c.root)
assert.True(t, os.IsNotExist(err))
} }
func TestCacheOpens(t *testing.T) { func TestCacheOpens(t *testing.T) {
r := fstest.NewRun(t) _, c, cleanup := newTestCache(t)
defer r.Finalise() defer cleanup()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c, err := New(ctx, r.Fremote, &vfscommon.DefaultOpt)
require.NoError(t, err)
defer func() { require.NoError(t, c.CleanUp()) }()
assert.Equal(t, []string(nil), itemAsString(c)) assert.Equal(t, []string(nil), itemAsString(c))
potato := c.Item("potato") potato := c.Item("potato")
@ -197,18 +209,8 @@ func TestCacheOpens(t *testing.T) {
// test the open, mkdir, purge, close, purge sequence // test the open, mkdir, purge, close, purge sequence
func TestCacheOpenMkdir(t *testing.T) { func TestCacheOpenMkdir(t *testing.T) {
r := fstest.NewRun(t) _, c, cleanup := newTestCache(t)
defer r.Finalise() defer cleanup()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Disable the cache cleaner as it interferes with these tests
opt := vfscommon.DefaultOpt
opt.CachePollInterval = 0
c, err := New(ctx, r.Fremote, &opt)
require.NoError(t, err)
defer func() { require.NoError(t, c.CleanUp()) }()
// open // open
potato := c.Item("sub/potato") potato := c.Item("sub/potato")
@ -227,16 +229,14 @@ func TestCacheOpenMkdir(t *testing.T) {
}, itemAsString(c)) }, itemAsString(c))
// test directory exists // test directory exists
fi, err := os.Stat(filepath.Dir(p)) fi := assertPathExist(t, filepath.Dir(p))
require.NoError(t, err)
assert.True(t, fi.IsDir()) assert.True(t, fi.IsDir())
// clean the cache // clean the cache
c.purgeOld(-10 * time.Second) c.purgeOld(-10 * time.Second)
// test directory still exists // test directory still exists
fi, err = os.Stat(filepath.Dir(p)) fi = assertPathExist(t, filepath.Dir(p))
require.NoError(t, err)
assert.True(t, fi.IsDir()) assert.True(t, fi.IsDir())
// close // close
@ -252,39 +252,23 @@ func TestCacheOpenMkdir(t *testing.T) {
assert.Equal(t, []string(nil), itemAsString(c)) assert.Equal(t, []string(nil), itemAsString(c))
// FIXME test directory does not exist // test directory does not exist
// _, err = os.Stat(filepath.Dir(p)) assertPathNotExist(t, filepath.Dir(p))
// require.True(t, os.IsNotExist(err))
} }
func TestCachePurgeOld(t *testing.T) { func TestCachePurgeOld(t *testing.T) {
r := fstest.NewRun(t) _, c, cleanup := newTestCache(t)
defer r.Finalise() defer cleanup()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c, err := New(ctx, r.Fremote, &vfscommon.DefaultOpt)
require.NoError(t, err)
defer func() { require.NoError(t, c.CleanUp()) }()
// Test funcs // Test funcs
var removed []string var removed []string
//removedDir := true
removeFile := func(item *Item) { removeFile := func(item *Item) {
removed = append(removed, item.name) removed = append(removed, item.name)
item._remove("TestCachePurgeOld") item._remove("TestCachePurgeOld")
} }
// removeDir := func(name string) bool {
// if removedDir {
// removed = append(removed, filepath.ToSlash(name)+"/")
// }
// return removedDir
// }
removed = nil removed = nil
c._purgeOld(-10*time.Second, removeFile) c._purgeOld(-10*time.Second, removeFile)
// FIXME c._purgeEmptyDirs(removeDir)
assert.Equal(t, []string(nil), removed) assert.Equal(t, []string(nil), removed)
potato2 := c.Item("sub/dir2/potato2") potato2 := c.Item("sub/dir2/potato2")
@ -300,9 +284,7 @@ func TestCachePurgeOld(t *testing.T) {
}, itemAsString(c)) }, itemAsString(c))
removed = nil removed = nil
// removedDir = true
c._purgeOld(-10*time.Second, removeFile) c._purgeOld(-10*time.Second, removeFile)
// FIXME c._purgeEmptyDirs(removeDir)
assert.Equal(t, []string{ assert.Equal(t, []string{
"sub/dir2/potato2", "sub/dir2/potato2",
}, removed) }, removed)
@ -310,9 +292,7 @@ func TestCachePurgeOld(t *testing.T) {
require.NoError(t, potato.Close(nil)) require.NoError(t, potato.Close(nil))
removed = nil removed = nil
// removedDir = true
c._purgeOld(-10*time.Second, removeFile) c._purgeOld(-10*time.Second, removeFile)
// FIXME c._purgeEmptyDirs(removeDir)
assert.Equal(t, []string(nil), removed) assert.Equal(t, []string(nil), removed)
require.NoError(t, potato.Close(nil)) require.NoError(t, potato.Close(nil))
@ -322,9 +302,7 @@ func TestCachePurgeOld(t *testing.T) {
}, itemAsString(c)) }, itemAsString(c))
removed = nil removed = nil
// removedDir = false
c._purgeOld(10*time.Second, removeFile) c._purgeOld(10*time.Second, removeFile)
// FIXME c._purgeEmptyDirs(removeDir)
assert.Equal(t, []string(nil), removed) assert.Equal(t, []string(nil), removed)
assert.Equal(t, []string{ assert.Equal(t, []string{
@ -332,9 +310,7 @@ func TestCachePurgeOld(t *testing.T) {
}, itemAsString(c)) }, itemAsString(c))
removed = nil removed = nil
// removedDir = true
c._purgeOld(-10*time.Second, removeFile) c._purgeOld(-10*time.Second, removeFile)
// FIXME c._purgeEmptyDirs(removeDir)
assert.Equal(t, []string{ assert.Equal(t, []string{
"sub/dir/potato", "sub/dir/potato",
}, removed) }, removed)
@ -343,17 +319,8 @@ func TestCachePurgeOld(t *testing.T) {
} }
func TestCachePurgeOverQuota(t *testing.T) { func TestCachePurgeOverQuota(t *testing.T) {
r := fstest.NewRun(t) _, c, cleanup := newTestCache(t)
defer r.Finalise() defer cleanup()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Disable the cache cleaner as it interferes with these tests
opt := vfscommon.DefaultOpt
opt.CachePollInterval = 0
c, err := New(ctx, r.Fremote, &opt)
require.NoError(t, err)
// Test funcs // Test funcs
var removed []string var removed []string
@ -376,12 +343,10 @@ func TestCachePurgeOverQuota(t *testing.T) {
// Make some test files // Make some test files
potato := c.Item("sub/dir/potato") potato := c.Item("sub/dir/potato")
require.NoError(t, potato.Open(nil)) itemWrite(t, potato, "hello")
require.NoError(t, potato.Truncate(5))
potato2 := c.Item("sub/dir2/potato2") potato2 := c.Item("sub/dir2/potato2")
require.NoError(t, potato2.Open(nil)) itemWrite(t, potato2, "hello2")
require.NoError(t, potato2.Truncate(6))
assert.Equal(t, []string{ assert.Equal(t, []string{
`name="sub/dir/potato" opens=1 size=5`, `name="sub/dir/potato" opens=1 size=5`,
@ -407,7 +372,6 @@ func TestCachePurgeOverQuota(t *testing.T) {
// make potato2 definitely after potato // make potato2 definitely after potato
t1 := time.Now().Add(10 * time.Second) t1 := time.Now().Add(10 * time.Second)
require.NoError(t, potato2.Truncate(6))
potato2.info.ATime = t1 potato2.info.ATime = t1
// Check only potato removed to get below quota // Check only potato removed to get below quota
@ -470,3 +434,210 @@ func TestCachePurgeOverQuota(t *testing.T) {
assert.Equal(t, int64(0), c.used) assert.Equal(t, int64(0), c.used)
assert.Equal(t, []string(nil), itemAsString(c)) assert.Equal(t, []string(nil), itemAsString(c))
} }
func TestCacheInUse(t *testing.T) {
_, c, cleanup := newTestCache(t)
defer cleanup()
assert.False(t, c.InUse("potato"))
potato := c.Item("potato")
assert.False(t, c.InUse("potato"))
require.NoError(t, potato.Open(nil))
assert.True(t, c.InUse("potato"))
require.NoError(t, potato.Close(nil))
assert.False(t, c.InUse("potato"))
}
func TestCacheDirtyItem(t *testing.T) {
_, c, cleanup := newTestCache(t)
defer cleanup()
assert.Nil(t, c.DirtyItem("potato"))
potato := c.Item("potato")
assert.Nil(t, c.DirtyItem("potato"))
require.NoError(t, potato.Open(nil))
require.NoError(t, potato.Truncate(5))
assert.Equal(t, potato, c.DirtyItem("potato"))
require.NoError(t, potato.Close(nil))
assert.Nil(t, c.DirtyItem("potato"))
}
func TestCacheExistsAndRemove(t *testing.T) {
_, c, cleanup := newTestCache(t)
defer cleanup()
assert.False(t, c.Exists("potato"))
potato := c.Item("potato")
assert.False(t, c.Exists("potato"))
require.NoError(t, potato.Open(nil))
assert.True(t, c.Exists("potato"))
require.NoError(t, potato.Close(nil))
assert.True(t, c.Exists("potato"))
c.Remove("potato")
assert.False(t, c.Exists("potato"))
}
func TestCacheRename(t *testing.T) {
_, c, cleanup := newTestCache(t)
defer cleanup()
// setup
assert.False(t, c.Exists("potato"))
potato := c.Item("potato")
require.NoError(t, potato.Open(nil))
require.NoError(t, potato.Close(nil))
assert.True(t, c.Exists("potato"))
osPath := c.toOSPath("potato")
osPathMeta := c.toOSPathMeta("potato")
assertPathExist(t, osPath)
assertPathExist(t, osPathMeta)
// rename potato -> newPotato
require.NoError(t, c.Rename("potato", "newPotato", nil))
assertPathNotExist(t, osPath)
assertPathNotExist(t, osPathMeta)
assert.False(t, c.Exists("potato"))
osPath = c.toOSPath("newPotato")
osPathMeta = c.toOSPathMeta("newPotato")
assertPathExist(t, osPath)
assertPathExist(t, osPathMeta)
assert.True(t, c.Exists("newPotato"))
// rename newPotato -> sub/newPotato
require.NoError(t, c.Rename("newPotato", "sub/newPotato", nil))
assertPathNotExist(t, osPath)
assertPathNotExist(t, osPathMeta)
assert.False(t, c.Exists("potato"))
osPath = c.toOSPath("sub/newPotato")
osPathMeta = c.toOSPathMeta("sub/newPotato")
assertPathExist(t, osPath)
assertPathExist(t, osPathMeta)
assert.True(t, c.Exists("sub/newPotato"))
// remove
c.Remove("sub/newPotato")
assertPathNotExist(t, osPath)
assertPathNotExist(t, osPathMeta)
assert.False(t, c.Exists("sub/newPotato"))
// non existent file - is ignored
assert.NoError(t, c.Rename("nonexist", "nonexist2", nil))
}
func TestCacheCleaner(t *testing.T) {
opt := vfscommon.DefaultOpt
opt.CachePollInterval = 10 * time.Millisecond
opt.CacheMaxAge = 20 * time.Millisecond
_, c, cleanup := newTestCacheOpt(t, opt)
defer cleanup()
time.Sleep(2 * opt.CachePollInterval)
potato := c.Item("potato")
potato2, found := c.get("potato")
assert.Equal(t, potato, potato2)
assert.True(t, found)
time.Sleep(10 * opt.CachePollInterval)
potato2, found = c.get("potato")
assert.NotEqual(t, potato, potato2)
assert.False(t, found)
}
func TestCacheSetModTime(t *testing.T) {
_, c, cleanup := newTestCache(t)
defer cleanup()
t1 := time.Date(2010, 1, 2, 3, 4, 5, 9, time.UTC)
potato := c.Item("potato")
require.NoError(t, potato.Open(nil))
require.NoError(t, potato.Truncate(5))
require.NoError(t, potato.Close(nil))
c.SetModTime("potato", t1)
osPath := potato.c.toOSPath("potato")
fi, err := os.Stat(osPath)
require.NoError(t, err)
fstest.AssertTimeEqualWithPrecision(t, "potato", t1, fi.ModTime(), time.Second)
}
func TestCacheTotaInUse(t *testing.T) {
_, c, cleanup := newTestCache(t)
defer cleanup()
assert.Equal(t, int(0), c.TotalInUse())
potato := c.Item("potato")
assert.Equal(t, int(0), c.TotalInUse())
require.NoError(t, potato.Open(nil))
assert.Equal(t, int(1), c.TotalInUse())
require.NoError(t, potato.Truncate(5))
assert.Equal(t, int(1), c.TotalInUse())
potato2 := c.Item("potato2")
assert.Equal(t, int(1), c.TotalInUse())
require.NoError(t, potato2.Open(nil))
assert.Equal(t, int(2), c.TotalInUse())
require.NoError(t, potato2.Close(nil))
assert.Equal(t, int(1), c.TotalInUse())
require.NoError(t, potato.Close(nil))
assert.Equal(t, int(0), c.TotalInUse())
}
func TestCacheDump(t *testing.T) {
_, c, cleanup := newTestCache(t)
defer cleanup()
out := (*Cache)(nil).Dump()
assert.Equal(t, "Cache: <nil>\n", out)
out = c.Dump()
assert.Equal(t, "Cache{\n}\n", out)
c.Item("potato")
out = c.Dump()
want := "Cache{\n\t\"potato\": "
assert.Equal(t, want, out[:len(want)])
c.Remove("potato")
out = c.Dump()
assert.Equal(t, "Cache{\n}\n", out)
}

View File

@ -60,11 +60,12 @@ func newDownloader(item *Item, fcache fs.Fs, remote string, src fs.Object) (dl *
if err == nil { if err == nil {
// do nothing // do nothing
} else if os.IsNotExist(err) { } else if os.IsNotExist(err) {
fs.Debugf(src, "creating empty file") return nil, errors.New("vfs cache: internal error: newDownloader: called before Item.Open")
err = item._truncateToCurrentSize() // fs.Debugf(src, "creating empty file")
if err != nil { // err = item._truncateToCurrentSize()
return nil, errors.Wrap(err, "newDownloader: failed to create empty file") // if err != nil {
} // return nil, errors.Wrap(err, "newDownloader: failed to create empty file")
// }
} else { } else {
return nil, errors.Wrap(err, "newDownloader: failed to stat cache file") return nil, errors.Wrap(err, "newDownloader: failed to stat cache file")
} }

View File

@ -145,6 +145,13 @@ func (item *Item) getATime() time.Time {
return item.info.ATime return item.info.ATime
} }
// getName returns the name of the item
func (item *Item) getName() string {
item.mu.Lock()
defer item.mu.Unlock()
return item.name
}
// getDiskSize returns the size on disk (approximately) of the item // getDiskSize returns the size on disk (approximately) of the item
// //
// We return the sizes of the chunks we have fetched, however there is // We return the sizes of the chunks we have fetched, however there is
@ -374,6 +381,13 @@ func (item *Item) Dirty() {
item.mu.Unlock() item.mu.Unlock()
} }
// IsDirty returns true if the item is dirty
func (item *Item) IsDirty() bool {
item.mu.Lock()
defer item.mu.Unlock()
return item.metaDirty || item.info.Dirty
}
// Open the local file from the object passed in (which may be nil) // Open the local file from the object passed in (which may be nil)
// which implies we are about to create the file // which implies we are about to create the file
func (item *Item) Open(o fs.Object) (err error) { func (item *Item) Open(o fs.Object) (err error) {
@ -445,10 +459,9 @@ func (item *Item) Open(o fs.Object) (err error) {
// Store stores the local cache file to the remote object, returning // Store stores the local cache file to the remote object, returning
// the new remote object. objOld is the old object if known. // the new remote object. objOld is the old object if known.
// //
// call with item lock held // Call with lock held
func (item *Item) _store() (err error) { func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) {
defer log.Trace(item.name, "item=%p", item)("err=%v", &err) defer log.Trace(item.name, "item=%p", item)("err=%v", &err)
ctx := context.Background()
// Ensure any segments not transferred are brought in // Ensure any segments not transferred are brought in
err = item._ensure(0, item.info.Size) err = item._ensure(0, item.info.Size)
@ -462,23 +475,51 @@ func (item *Item) _store() (err error) {
return errors.Wrap(err, "vfs cache: failed to find cache file") return errors.Wrap(err, "vfs cache: failed to find cache file")
} }
item.mu.Unlock()
o, err := operations.Copy(ctx, item.c.fremote, item.o, item.name, cacheObj) o, err := operations.Copy(ctx, item.c.fremote, item.o, item.name, cacheObj)
item.mu.Lock()
if err != nil { if err != nil {
return errors.Wrap(err, "vfs cache: failed to transfer file from cache to remote") return errors.Wrap(err, "vfs cache: failed to transfer file from cache to remote")
} }
item.o = o item.o = o
item._updateFingerprint() item._updateFingerprint()
item.info.Dirty = false
err = item._save()
if err != nil {
fs.Errorf(item.name, "Failed to write metadata file: %v", err)
}
if storeFn != nil && item.o != nil {
// Write the object back to the VFS layer as last
// thing we do with mutex unlocked
item.mu.Unlock()
storeFn(item.o)
item.mu.Lock()
}
return nil return nil
} }
// Store stores the local cache file to the remote object, returning
// the new remote object. objOld is the old object if known.
func (item *Item) store(ctx context.Context, storeFn StoreFn) (err error) {
item.mu.Lock()
defer item.mu.Unlock()
return item._store(ctx, storeFn)
}
// Close the cache file // Close the cache file
func (item *Item) Close(storeFn StoreFn) (err error) { func (item *Item) Close(storeFn StoreFn) (err error) {
defer log.Trace(item.o, "Item.Close")("err=%v", &err) defer log.Trace(item.o, "Item.Close")("err=%v", &err)
var ( var (
downloader *downloader downloader *downloader
o fs.Object syncWriteBack = item.c.opt.WriteBack <= 0
) )
// close downloader and set item with mutex unlocked // FIXME need to unlock to kill downloader - should we
// re-arrange locking so this isn't necessary? maybe
// downloader should use the item mutex for locking? or put a
// finer lock on Rs?
//
// close downloader with mutex unlocked
// downloader.Write calls ensure which needs the lock
defer func() { defer func() {
if downloader != nil { if downloader != nil {
closeErr := downloader.close(nil) closeErr := downloader.close(nil)
@ -486,9 +527,11 @@ func (item *Item) Close(storeFn StoreFn) (err error) {
err = closeErr err = closeErr
} }
} }
if err == nil && storeFn != nil && o != nil { // save the metadata once more since it may be dirty
// Write the object back to the VFS layer // after the downloader
storeFn(item.o) saveErr := item._save()
if saveErr != nil && err == nil {
err = errors.Wrap(saveErr, "close failed to save item")
} }
}() }()
item.mu.Lock() item.mu.Lock()
@ -533,15 +576,14 @@ func (item *Item) Close(storeFn StoreFn) (err error) {
// upload the file to backing store if changed // upload the file to backing store if changed
if item.info.Dirty { if item.info.Dirty {
fs.Debugf(item.name, "item changed - writeback") fs.Debugf(item.name, "item changed - writeback in %v", item.c.opt.WriteBack)
err = item._store() if syncWriteBack {
if err != nil { // do synchronous writeback
fs.Errorf(item.name, "%v", err) err = item._store(context.Background(), storeFn)
return err } else {
// asynchronous writeback
item.c.writeback.add(item, storeFn)
} }
fs.Debugf(item.o, "transferred to remote")
item.info.Dirty = false
o = item.o
} }
return err return err
@ -590,16 +632,6 @@ func (item *Item) _checkObject(o fs.Object) error {
return nil return nil
} }
// check the fingerprint of an object and update the item or delete
// the cached file accordingly.
//
// It ensures the file is the correct size for the object
func (item *Item) checkObject(o fs.Object) error {
item.mu.Lock()
defer item.mu.Unlock()
return item._checkObject(o)
}
// remove the cached file // remove the cached file
// //
// call with lock held // call with lock held
@ -760,6 +792,10 @@ func (item *Item) setModTime(modTime time.Time) {
item.mu.Lock() item.mu.Lock()
item._updateFingerprint() item._updateFingerprint()
item._setModTime(modTime) item._setModTime(modTime)
err := item._save()
if err != nil {
fs.Errorf(item.name, "vfs cache: setModTime: failed to save item info: %v", err)
}
item.mu.Unlock() item.mu.Unlock()
} }
@ -796,6 +832,10 @@ func (item *Item) WriteAt(b []byte, off int64) (n int, err error) {
if n > 0 { if n > 0 {
item._dirty() item._dirty()
} }
end := off + int64(n)
if end > item.info.Size {
item.info.Size = end
}
item.mu.Unlock() item.mu.Unlock()
return n, err return n, err
} }

311
vfs/vfscache/writeback.go Normal file
View File

@ -0,0 +1,311 @@
// This keeps track of the files which need to be written back
package vfscache
import (
"container/heap"
"context"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/vfs/vfscommon"
)
const (
uploadDelay = 10 * time.Second // delay betwen upload attempts
maxUploadAttempts = 10 // max number of times to try to upload
)
// writeBack keeps track of the items which need to be written back to the disk at some point
type writeBack struct {
mu sync.Mutex
items writeBackItems // priority queue of *writeBackItem
lookup map[*Item]*writeBackItem // for getting a *writeBackItem from a *Item
opt *vfscommon.Options // VFS options
timer *time.Timer // next scheduled time for the uploader
kick chan struct{} // send on this channel to wake up the uploader
uploads int // number of uploads in progress
}
// make a new writeBack
//
// cancel the context to stop the background goroutine
func newWriteBack(ctx context.Context, opt *vfscommon.Options) *writeBack {
wb := &writeBack{
items: writeBackItems{},
lookup: make(map[*Item]*writeBackItem),
opt: opt,
timer: time.NewTimer(time.Second),
kick: make(chan struct{}, 1),
}
wb.timer.Stop()
heap.Init(&wb.items)
go wb.uploader(ctx)
return wb
}
// writeBackItem stores an Item awaiting writeback
//
// writeBack.mu must be held to manipulate this
type writeBackItem struct {
index int // index into the priority queue for update
item *Item // Item that needs writeback
expiry time.Time // When this expires we will write it back
uploading bool // If we are uploading the item
cancel context.CancelFunc // To cancel the upload with
storeFn StoreFn // To write the object back with
tries int // number of times we have tried to upload
delay time.Duration // delay between upload attempts
}
// A writeBackItems implements a priority queue by implementing
// heap.Interface and holds writeBackItems.
type writeBackItems []*writeBackItem
func (ws writeBackItems) Len() int { return len(ws) }
func (ws writeBackItems) Less(i, j int) bool {
return ws[i].expiry.Sub(ws[j].expiry) < 0
}
func (ws writeBackItems) Swap(i, j int) {
ws[i], ws[j] = ws[j], ws[i]
ws[i].index = i
ws[j].index = j
}
func (ws *writeBackItems) Push(x interface{}) {
n := len(*ws)
item := x.(*writeBackItem)
item.index = n
*ws = append(*ws, item)
}
func (ws *writeBackItems) Pop() interface{} {
old := *ws
n := len(old)
item := old[n-1]
old[n-1] = nil // avoid memory leak
item.index = -1 // for safety
*ws = old[0 : n-1]
return item
}
// update modifies the expiry of an Item in the queue.
//
// call with lock held
func (ws *writeBackItems) _update(item *writeBackItem, expiry time.Time) {
item.expiry = expiry
heap.Fix(ws, item.index)
}
// return a new expiry time based from now until the WriteBack timeout
//
// call with lock held
func (wb *writeBack) _newExpiry() time.Time {
expiry := time.Now()
if wb.opt.WriteBack > 0 {
expiry = expiry.Add(wb.opt.WriteBack)
}
return expiry
}
// make a new writeBackItem
//
// call with the lock held
func (wb *writeBack) _newItem(item *Item) *writeBackItem {
wbItem := &writeBackItem{
item: item,
expiry: wb._newExpiry(),
delay: uploadDelay,
}
wb._addItem(wbItem)
wb._pushItem(wbItem)
return wbItem
}
// add a writeBackItem to the lookup map
//
// call with the lock held
func (wb *writeBack) _addItem(wbItem *writeBackItem) {
wb.lookup[wbItem.item] = wbItem
}
// delete a writeBackItem from the lookup map
//
// call with the lock held
func (wb *writeBack) _delItem(wbItem *writeBackItem) {
delete(wb.lookup, wbItem.item)
}
// pop a writeBackItem from the items heap
//
// call with the lock held
func (wb *writeBack) _popItem() (wbItem *writeBackItem) {
return heap.Pop(&wb.items).(*writeBackItem)
}
// push a writeBackItem onto the items heap
//
// call with the lock held
func (wb *writeBack) _pushItem(wbItem *writeBackItem) {
heap.Push(&wb.items, wbItem)
}
// peek the oldest writeBackItem - may be nil
//
// call with the lock held
func (wb *writeBack) _peekItem() (wbItem *writeBackItem) {
if len(wb.items) == 0 {
return nil
}
return wb.items[0]
}
// reset the timer which runs the expiries
func (wb *writeBack) _resetTimer() {
wbItem := wb._peekItem()
if wbItem == nil {
wb.timer.Stop()
} else {
dt := time.Until(wbItem.expiry)
if dt < 0 {
dt = 0
}
wb.timer.Reset(dt)
}
}
// add adds an item to the writeback queue or resets its timer if it
// is already there
func (wb *writeBack) add(item *Item, storeFn StoreFn) {
wb.mu.Lock()
defer wb.mu.Unlock()
wbItem, ok := wb.lookup[item]
if !ok {
wbItem = wb._newItem(item)
} else {
if wbItem.uploading {
// We are uploading already so cancel the upload
wb._cancelUpload(wbItem)
}
// Kick the timer on
wb.items._update(wbItem, wb._newExpiry())
}
wbItem.storeFn = storeFn
wb._resetTimer()
}
// kick the upload checker
//
// This should be called at the end of uploads just in case we had to
// pause uploades because max items was exceeded
//
// call with the lock held
func (wb *writeBack) _kickUploader() {
select {
case wb.kick <- struct{}{}:
default:
}
}
// upload the item - called as a goroutine
func (wb *writeBack) upload(ctx context.Context, wbItem *writeBackItem) {
wb.mu.Lock()
defer wb.mu.Unlock()
item := wbItem.item
wbItem.tries++
wb.mu.Unlock()
err := item.store(ctx, wbItem.storeFn)
wb.mu.Lock()
wbItem.cancel() // cancel context to release resources since store done
if wbItem.uploading {
wbItem.uploading = false
wb.uploads--
}
if err != nil {
if wbItem.tries < maxUploadAttempts {
fs.Errorf(item.getName(), "vfs cache: failed to upload, will retry in %v: %v", wb.opt.WriteBack, err)
// push the item back on the queue for retry
wb._pushItem(wbItem)
wb.items._update(wbItem, time.Now().Add(wbItem.delay))
wbItem.delay *= 2
} else {
fs.Errorf(item.getName(), "vfs cache: failed to upload, will retry in %v: %v", wb.opt.WriteBack, err)
}
} else {
// show that we are done with the item
wb._delItem(wbItem)
}
wb._kickUploader()
}
// cancel the upload
//
// call with lock held
func (wb *writeBack) _cancelUpload(wbItem *writeBackItem) {
if !wbItem.uploading {
return
}
fs.Debugf(wbItem.item.getName(), "vfs cache: canceling upload")
if wbItem.cancel != nil {
// Cancel the upload - this may or may not be effective
// we don't wait for the completion
wbItem.cancel()
}
if wbItem.uploading {
wbItem.uploading = false
wb.uploads--
}
// uploading items are not on the heap so add them back
wb._pushItem(wbItem)
}
// this uploads as many items as possible
func (wb *writeBack) processItems(ctx context.Context) {
wb.mu.Lock()
defer wb.mu.Unlock()
resetTimer := false
for wbItem := wb._peekItem(); wbItem != nil && time.Until(wbItem.expiry) <= 0; wbItem = wb._peekItem() {
// If reached transfer limit don't restart the timer
if wb.uploads >= fs.Config.Transfers {
fs.Debugf(wbItem.item.getName(), "vfs cache: delaying writeback as --transfers exceeded")
resetTimer = false
break
}
resetTimer = true
// Pop the item, mark as uploading and start the uploader
wbItem = wb._popItem()
wbItem.uploading = true
wb.uploads++
newCtx, cancel := context.WithCancel(ctx)
wbItem.cancel = cancel
go wb.upload(newCtx, wbItem)
}
if resetTimer {
wb._resetTimer()
}
}
// Looks for items which need writing back and write them back until
// the context is cancelled
func (wb *writeBack) uploader(ctx context.Context) {
for {
select {
case <-ctx.Done():
wb.timer.Stop()
return
case <-wb.timer.C:
wb.processItems(ctx)
case <-wb.kick:
wb.processItems(ctx)
}
}
}

View File

@ -30,6 +30,7 @@ type Options struct {
CaseInsensitive bool CaseInsensitive bool
WriteWait time.Duration // time to wait for in-sequence write WriteWait time.Duration // time to wait for in-sequence write
ReadWait time.Duration // time to wait for in-sequence read ReadWait time.Duration // time to wait for in-sequence read
WriteBack time.Duration // time to wait before writing back dirty files
} }
// DefaultOpt is the default values uses for Opt // DefaultOpt is the default values uses for Opt
@ -54,4 +55,5 @@ var DefaultOpt = Options{
CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise
WriteWait: 1000 * time.Millisecond, WriteWait: 1000 * time.Millisecond,
ReadWait: 20 * time.Millisecond, ReadWait: 20 * time.Millisecond,
WriteBack: 5 * time.Second,
} }

View File

@ -35,5 +35,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &Opt.CaseInsensitive, "vfs-case-insensitive", "", Opt.CaseInsensitive, "If a file name not found, find a case insensitive match.") flags.BoolVarP(flagSet, &Opt.CaseInsensitive, "vfs-case-insensitive", "", Opt.CaseInsensitive, "If a file name not found, find a case insensitive match.")
flags.DurationVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", Opt.WriteWait, "Time to wait for in-sequence write before giving error.") flags.DurationVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", Opt.WriteWait, "Time to wait for in-sequence write before giving error.")
flags.DurationVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", Opt.ReadWait, "Time to wait for in-sequence read before seeking.") flags.DurationVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", Opt.ReadWait, "Time to wait for in-sequence read before seeking.")
flags.DurationVarP(flagSet, &Opt.WriteBack, "vfs-writeback", "", Opt.ReadWait, "Time to writeback files after last use when using cache.")
platformFlags(flagSet) platformFlags(flagSet)
} }

View File

@ -46,17 +46,25 @@ var (
func RunTests(t *testing.T, useVFS bool, fn MountFn) { func RunTests(t *testing.T, useVFS bool, fn MountFn) {
mountFn = fn mountFn = fn
flag.Parse() flag.Parse()
cacheModes := []vfscommon.CacheMode{ tests := []struct {
vfscommon.CacheModeOff, cacheMode vfscommon.CacheMode
vfscommon.CacheModeMinimal, writeBack time.Duration
vfscommon.CacheModeWrites, }{
vfscommon.CacheModeFull, {cacheMode: vfscommon.CacheModeOff},
{cacheMode: vfscommon.CacheModeMinimal},
{cacheMode: vfscommon.CacheModeWrites},
{cacheMode: vfscommon.CacheModeFull},
{cacheMode: vfscommon.CacheModeFull, writeBack: 100 * time.Millisecond},
} }
run = newRun(useVFS) run = newRun(useVFS)
for _, cacheMode := range cacheModes { for _, test := range tests {
run.cacheMode(cacheMode) run.cacheMode(test.cacheMode, test.writeBack)
log.Printf("Starting test run with cache mode %v", cacheMode) what := fmt.Sprintf("CacheMode=%v", test.cacheMode)
ok := t.Run(fmt.Sprintf("CacheMode=%v", cacheMode), func(t *testing.T) { if test.writeBack > 0 {
what += fmt.Sprintf(",WriteBack=%v", test.writeBack)
}
log.Printf("Starting test run with %s", what)
ok := t.Run(what, func(t *testing.T) {
t.Run("TestTouchAndDelete", TestTouchAndDelete) t.Run("TestTouchAndDelete", TestTouchAndDelete)
t.Run("TestRenameOpenHandle", TestRenameOpenHandle) t.Run("TestRenameOpenHandle", TestRenameOpenHandle)
t.Run("TestDirLs", TestDirLs) t.Run("TestDirLs", TestDirLs)
@ -84,7 +92,7 @@ func RunTests(t *testing.T, useVFS bool, fn MountFn) {
t.Run("TestWriteFileDup", TestWriteFileDup) t.Run("TestWriteFileDup", TestWriteFileDup)
t.Run("TestWriteFileAppend", TestWriteFileAppend) t.Run("TestWriteFileAppend", TestWriteFileAppend)
}) })
log.Printf("Finished test run with cache mode %v (ok=%v)", cacheMode, ok) log.Printf("Finished test run with %s (ok=%v)", what, ok)
if !ok { if !ok {
break break
} }
@ -218,8 +226,8 @@ func (r *Run) umount() {
} }
} }
// cacheMode flushes the VFS and changes the CacheMode // cacheMode flushes the VFS and changes the CacheMode and the writeBack time
func (r *Run) cacheMode(cacheMode vfscommon.CacheMode) { func (r *Run) cacheMode(cacheMode vfscommon.CacheMode, writeBack time.Duration) {
if r.skip { if r.skip {
log.Printf("FUSE not found so skipping cacheMode") log.Printf("FUSE not found so skipping cacheMode")
return return
@ -239,6 +247,7 @@ func (r *Run) cacheMode(cacheMode vfscommon.CacheMode) {
} }
// Reset the cache mode // Reset the cache mode
r.vfs.SetCacheMode(cacheMode) r.vfs.SetCacheMode(cacheMode)
r.vfs.Opt.WriteBack = writeBack
// Flush the directory cache // Flush the directory cache
r.vfs.FlushDirCache() r.vfs.FlushDirCache()

View File

@ -17,21 +17,20 @@ import (
) )
// Open a file for write // Open a file for write
func writeHandleCreate(t *testing.T, r *fstest.Run) (*VFS, *WriteFileHandle) { func writeHandleCreate(t *testing.T) (r *fstest.Run, vfs *VFS, fh *WriteFileHandle, cleanup func()) {
vfs := New(r.Fremote, nil) r, vfs, cleanup = newTestVFS(t)
h, err := vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777) h, err := vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777)
require.NoError(t, err) require.NoError(t, err)
fh, ok := h.(*WriteFileHandle) fh, ok := h.(*WriteFileHandle)
require.True(t, ok) require.True(t, ok)
return vfs, fh return r, vfs, fh, cleanup
} }
func TestWriteFileHandleMethods(t *testing.T) { func TestWriteFileHandleMethods(t *testing.T) {
r := fstest.NewRun(t) r, vfs, fh, cleanup := writeHandleCreate(t)
defer r.Finalise() defer cleanup()
vfs, fh := writeHandleCreate(t, r)
// String // String
assert.Equal(t, "file1 (w)", fh.String()) assert.Equal(t, "file1 (w)", fh.String())
@ -133,9 +132,8 @@ func TestWriteFileHandleMethods(t *testing.T) {
} }
func TestWriteFileHandleWriteAt(t *testing.T) { func TestWriteFileHandleWriteAt(t *testing.T) {
r := fstest.NewRun(t) r, vfs, fh, cleanup := writeHandleCreate(t)
defer r.Finalise() defer cleanup()
vfs, fh := writeHandleCreate(t, r)
// Preconditions // Preconditions
assert.Equal(t, int64(0), fh.offset) assert.Equal(t, int64(0), fh.offset)
@ -179,9 +177,8 @@ func TestWriteFileHandleWriteAt(t *testing.T) {
} }
func TestWriteFileHandleFlush(t *testing.T) { func TestWriteFileHandleFlush(t *testing.T) {
r := fstest.NewRun(t) _, vfs, fh, cleanup := writeHandleCreate(t)
defer r.Finalise() defer cleanup()
vfs, fh := writeHandleCreate(t, r)
// Check Flush already creates file for unwritten handles, without closing it // Check Flush already creates file for unwritten handles, without closing it
err := fh.Flush() err := fh.Flush()
@ -213,9 +210,8 @@ func TestWriteFileHandleFlush(t *testing.T) {
} }
func TestWriteFileHandleRelease(t *testing.T) { func TestWriteFileHandleRelease(t *testing.T) {
r := fstest.NewRun(t) _, _, fh, cleanup := writeHandleCreate(t)
defer r.Finalise() defer cleanup()
_, fh := writeHandleCreate(t, r)
// Check Release closes file // Check Release closes file
err := fh.Release() err := fh.Release()
@ -262,12 +258,12 @@ func canSetModTime(t *testing.T, r *fstest.Run) bool {
// tests mod time on open files // tests mod time on open files
func TestWriteFileModTimeWithOpenWriters(t *testing.T) { func TestWriteFileModTimeWithOpenWriters(t *testing.T) {
r := fstest.NewRun(t) r, vfs, fh, cleanup := writeHandleCreate(t)
defer r.Finalise() defer cleanup()
if !canSetModTime(t, r) { if !canSetModTime(t, r) {
return t.Skip("can't set mod time")
} }
vfs, fh := writeHandleCreate(t, r)
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC) mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
@ -290,9 +286,8 @@ func TestWriteFileModTimeWithOpenWriters(t *testing.T) {
} }
func testFileReadAt(t *testing.T, n int) { func testFileReadAt(t *testing.T, n int) {
r := fstest.NewRun(t) _, vfs, fh, cleanup := writeHandleCreate(t)
defer r.Finalise() defer cleanup()
vfs, fh := writeHandleCreate(t, r)
contents := []byte(random.String(n)) contents := []byte(random.String(n))
if n != 0 { if n != 0 {