From f78cd1e0434ee597fb8937ba3cda0d5fd58becfe Mon Sep 17 00:00:00 2001 From: Aleksandar Jankovic Date: Mon, 17 Jun 2019 10:34:30 +0200 Subject: [PATCH] Add context propagation to rclone - Change rclone/fs interfaces to accept context.Context - Update interface implementations to use context.Context - Change top level usage to propagate context to lover level functions Context propagation is needed for stopping transfers and passing other request-scoped values. --- backend/alias/alias_internal_test.go | 3 +- backend/amazonclouddrive/amazonclouddrive.go | 116 +++---- backend/azureblob/azureblob.go | 69 ++-- backend/b2/b2.go | 108 ++++--- backend/b2/upload.go | 9 +- backend/box/box.go | 134 ++++---- backend/cache/cache.go | 140 ++++----- backend/cache/cache_internal_test.go | 119 +++---- backend/cache/cache_upload_test.go | 47 +-- backend/cache/directory.go | 7 +- backend/cache/handle.go | 21 +- backend/cache/object.go | 65 ++-- backend/cache/storage_persistent.go | 5 +- backend/crypt/cipher.go | 27 +- backend/crypt/cipher_test.go | 17 +- backend/crypt/crypt.go | 127 ++++---- backend/drive/drive.go | 170 +++++----- backend/drive/drive_internal_test.go | 13 +- backend/dropbox/dropbox.go | 43 +-- backend/ftp/ftp.go | 42 +-- .../googlecloudstorage/googlecloudstorage.go | 52 ++-- backend/http/http.go | 27 +- backend/http/http_internal_test.go | 23 +- backend/jottacloud/jottacloud.go | 67 ++-- backend/koofr/koofr.go | 43 +-- backend/local/about_unix.go | 3 +- backend/local/about_windows.go | 3 +- backend/local/local.go | 41 +-- backend/local/local_internal_test.go | 12 +- backend/mega/mega.go | 57 ++-- backend/onedrive/onedrive.go | 154 ++++----- backend/opendrive/opendrive.go | 108 +++---- backend/pcloud/pcloud.go | 128 ++++---- backend/qingstor/qingstor.go | 51 +-- backend/s3/s3.go | 116 +++---- backend/sftp/sftp.go | 47 +-- backend/swift/swift.go | 53 ++-- backend/union/union.go | 49 +-- backend/webdav/webdav.go | 80 ++--- backend/yandex/yandex.go | 57 ++-- cmd/about/about.go | 3 +- cmd/cat/cat.go | 3 +- cmd/check/check.go | 6 +- cmd/cleanup/cleanup.go | 4 +- cmd/copy/copy.go | 6 +- cmd/copyto/copyto.go | 6 +- cmd/copyurl/copyurl.go | 4 +- cmd/cryptcheck/cryptcheck.go | 14 +- cmd/dbhashsum/dbhashsum.go | 3 +- cmd/dedupe/dedupe.go | 3 +- cmd/delete/delete.go | 4 +- cmd/deletefile/deletefile.go | 6 +- cmd/hashsum/hashsum.go | 3 +- cmd/info/info.go | 25 +- cmd/link/link.go | 3 +- cmd/ls/ls.go | 3 +- cmd/lsd/lsd.go | 3 +- cmd/lsf/lsf.go | 7 +- cmd/lsf/lsf_test.go | 41 +-- cmd/lsjson/lsjson.go | 3 +- cmd/lsl/lsl.go | 3 +- cmd/md5sum/md5sum.go | 3 +- cmd/memtest/memtest.go | 6 +- cmd/mkdir/mkdir.go | 4 +- cmd/mountlib/mounttest/dir.go | 5 +- cmd/mountlib/mounttest/fs.go | 7 +- cmd/move/move.go | 6 +- cmd/moveto/moveto.go | 6 +- cmd/ncdu/ncdu.go | 8 +- cmd/ncdu/scan/scan.go | 5 +- cmd/purge/purge.go | 4 +- cmd/rc/rc.go | 3 +- cmd/rcat/rcat.go | 3 +- cmd/rmdir/rmdir.go | 4 +- cmd/rmdirs/rmdirs.go | 4 +- cmd/serve/dlna/dlna_test.go | 3 +- cmd/serve/ftp/ftp_test.go | 3 +- cmd/serve/http/http.go | 2 +- cmd/serve/httplib/serve/serve.go | 4 +- cmd/serve/restic/restic.go | 16 +- cmd/serve/restic/restic_test.go | 3 +- cmd/serve/sftp/connection.go | 9 +- cmd/serve/sftp/sftp_test.go | 3 +- cmd/serve/webdav/webdav.go | 4 +- cmd/serve/webdav/webdav_test.go | 3 +- cmd/settier/settier.go | 4 +- cmd/sha1sum/sha1sum.go | 3 +- cmd/size/size.go | 3 +- cmd/sync/sync.go | 4 +- cmd/touch/touch.go | 11 +- cmd/touch/touch_test.go | 23 +- cmd/tree/tree.go | 5 +- fs/accounting/stats.go | 3 +- fs/accounting/token_bucket.go | 2 +- fs/chunkedreader/chunkedreader.go | 17 +- fs/chunkedreader/chunkedreader_test.go | 15 +- fs/config/rc.go | 18 +- fs/config/rc_test.go | 17 +- fs/dir.go | 11 +- fs/filter/filter.go | 19 +- fs/filter/filter_test.go | 15 +- fs/fs.go | 83 ++--- fs/fs_test.go | 7 +- fs/list/list.go | 13 +- fs/list/list_test.go | 21 +- fs/march/march.go | 10 +- fs/mimetype.go | 9 +- fs/object/object.go | 43 +-- fs/object/object_test.go | 51 +-- fs/operations/dedupe.go | 61 ++-- fs/operations/dedupe_test.go | 75 ++--- fs/operations/listdirsorted_test.go | 31 +- fs/operations/lsjson.go | 11 +- fs/operations/multithread.go | 14 +- fs/operations/multithread_test.go | 9 +- fs/operations/operations.go | 294 +++++++++--------- fs/operations/operations_test.go | 201 ++++++------ fs/operations/rc.go | 51 +-- fs/operations/rc_test.go | 81 ++--- fs/operations/reopen.go | 7 +- fs/operations/reopen_test.go | 7 +- fs/rc/config.go | 8 +- fs/rc/config_test.go | 13 +- fs/rc/internal.go | 17 +- fs/rc/internal_test.go | 17 +- fs/rc/job.go | 26 +- fs/rc/job_test.go | 18 +- fs/rc/rcserver/rcserver.go | 6 +- fs/rc/registry.go | 3 +- fs/sync/rc.go | 14 +- fs/sync/rc_test.go | 25 +- fs/sync/sync.go | 70 ++--- fs/sync/sync_test.go | 241 +++++++------- fs/walk/walk.go | 69 ++-- fs/walk/walk_test.go | 43 +-- fstest/fstest.go | 17 +- fstest/fstests/fstests.go | 145 ++++----- fstest/mockfs/mockfs.go | 11 +- fstest/mockobject/mockobject.go | 15 +- fstest/run.go | 36 ++- fstest/test_all/clean.go | 5 +- lib/dircache/dircache.go | 31 +- vfs/dir.go | 21 +- vfs/dir_handle_test.go | 7 +- vfs/dir_test.go | 13 +- vfs/file.go | 41 +-- vfs/file_test.go | 3 +- vfs/rc.go | 7 +- vfs/read.go | 9 +- vfs/read_test.go | 3 +- vfs/read_write.go | 9 +- vfs/read_write_test.go | 3 +- vfs/vfs.go | 4 +- vfs/vfs_test.go | 15 +- vfs/write.go | 3 +- vfs/write_test.go | 9 +- 156 files changed, 2570 insertions(+), 2380 deletions(-) diff --git a/backend/alias/alias_internal_test.go b/backend/alias/alias_internal_test.go index 57ac2059d..309de13f1 100644 --- a/backend/alias/alias_internal_test.go +++ b/backend/alias/alias_internal_test.go @@ -1,6 +1,7 @@ package alias import ( + "context" "fmt" "path" "path/filepath" @@ -69,7 +70,7 @@ func TestNewFS(t *testing.T) { prepare(t, remoteRoot) f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot)) require.NoError(t, err, what) - gotEntries, err := f.List(test.fsList) + gotEntries, err := f.List(context.Background(), test.fsList) require.NoError(t, err, what) sort.Sort(gotEntries) diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go index 9eafc6f08..cbdf3b2f1 100644 --- a/backend/amazonclouddrive/amazonclouddrive.go +++ b/backend/amazonclouddrive/amazonclouddrive.go @@ -12,6 +12,7 @@ we ignore assets completely! */ import ( + "context" "encoding/json" "fmt" "io" @@ -246,6 +247,7 @@ func filterRequest(req *http.Request) { // NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -307,7 +309,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { f.dirCache = dircache.New(root, f.trueRootID, f) // Find the current root - err = f.dirCache.FindRoot(false) + err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) @@ -315,12 +317,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent - err = tempF.dirCache.FindRoot(false) + err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } - _, err := tempF.newObjectWithInfo(remote, nil) + _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f @@ -352,7 +354,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) { // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. -func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) { +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) { o := &Object{ fs: f, remote: remote, @@ -361,7 +363,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) // Set info but not meta o.info = info } else { - err := o.readMetaData() // reads info and meta, returning an error + err := o.readMetaData(ctx) // reads info and meta, returning an error if err != nil { return nil, err } @@ -371,12 +373,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { - return f.newObjectWithInfo(remote, nil) +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID -func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { //fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf) folder := acd.FolderFromId(pathID, f.c.Nodes) var resp *http.Response @@ -403,7 +405,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er } // CreateDir makes a directory with pathID as parent and name leaf -func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf) folder := acd.FolderFromId(pathID, f.c.Nodes) var resp *http.Response @@ -501,12 +503,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { - err = f.dirCache.FindRoot(false) +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + err = f.dirCache.FindRoot(ctx, false) if err != nil { return nil, err } - directoryID, err := f.dirCache.FindDir(dir, false) + directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } @@ -524,7 +526,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { d := fs.NewDir(remote, when).SetID(*node.Id) entries = append(entries, d) case fileKind: - o, err := f.newObjectWithInfo(remote, node) + o, err := f.newObjectWithInfo(ctx, remote, node) if err != nil { iErr = err return true @@ -568,7 +570,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // At the end of large uploads. The speculation is that the timeout // is waiting for the sha1 hashing to complete and the file may well // be properly uploaded. -func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) { +func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) { // Return if no error - all is well if inErr == nil { return false, inInfo, inErr @@ -608,7 +610,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus) remote := src.Remote() for i := 1; i <= retries; i++ { - o, err := f.NewObject(remote) + o, err := f.NewObject(ctx, remote) if err == fs.ErrorObjectNotFound { fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries) } else if err != nil { @@ -634,7 +636,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() // Temporary Object under construction @@ -643,17 +645,17 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. remote: remote, } // Check if object already exists - err := o.readMetaData() + err := o.readMetaData(ctx) switch err { case nil: - return o, o.Update(in, src, options...) + return o, o.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it default: return nil, err } // If not create it - leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true) + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true) if err != nil { return nil, err } @@ -669,7 +671,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. info, resp, err = folder.Put(in, leaf) f.tokenRenewer.Stop() var ok bool - ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start)) + ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start)) if ok { return false, nil } @@ -683,13 +685,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { - err := f.dirCache.FindRoot(true) +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + err := f.dirCache.FindRoot(ctx, true) if err != nil { return err } if dir != "" { - _, err = f.dirCache.FindDir(dir, true) + _, err = f.dirCache.FindDir(ctx, dir, true) } return err } @@ -703,7 +705,7 @@ func (f *Fs) Mkdir(dir string) error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { // go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$' srcObj, ok := src.(*Object) if !ok { @@ -712,15 +714,15 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { } // create the destination directory if necessary - err := f.dirCache.FindRoot(true) + err := f.dirCache.FindRoot(ctx, true) if err != nil { return nil, err } - srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false) + srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } - dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true) + dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, err } @@ -736,12 +738,12 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { srcErr, dstErr error ) for i := 1; i <= fs.Config.LowLevelRetries; i++ { - _, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object + _, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object if srcErr != nil && srcErr != fs.ErrorObjectNotFound { // exit if error on source return nil, srcErr } - dstObj, dstErr = f.NewObject(remote) + dstObj, dstErr = f.NewObject(ctx, remote) if dstErr != nil && dstErr != fs.ErrorObjectNotFound { // exit if error on dst return nil, dstErr @@ -770,7 +772,7 @@ func (f *Fs) DirCacheFlush() { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(src, "DirMove error: not same remote type") @@ -786,14 +788,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { } // find the root src directory - err = srcFs.dirCache.FindRoot(false) + err = srcFs.dirCache.FindRoot(ctx, false) if err != nil { return err } // find the root dst directory if dstRemote != "" { - err = f.dirCache.FindRoot(true) + err = f.dirCache.FindRoot(ctx, true) if err != nil { return err } @@ -808,14 +810,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { if dstRemote == "" { findPath = f.root } - dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true) + dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true) if err != nil { return err } // Check destination does not exist if dstRemote != "" { - _, err = f.dirCache.FindDir(dstRemote, false) + _, err = f.dirCache.FindDir(ctx, dstRemote, false) if err == fs.ErrorDirNotFound { // OK } else if err != nil { @@ -831,7 +833,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { if srcRemote == "" { srcDirectoryID, err = srcFs.dirCache.RootParentID() } else { - _, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false) + _, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false) } if err != nil { return err @@ -839,7 +841,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { srcLeaf, _ := dircache.SplitPath(srcPath) // Find ID of src - srcID, err := srcFs.dirCache.FindDir(srcRemote, false) + srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) if err != nil { return err } @@ -872,17 +874,17 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { // purgeCheck remotes the root directory, if check is set then it // refuses to do so if it has anything in -func (f *Fs) purgeCheck(dir string, check bool) error { +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache - err := dc.FindRoot(false) + err := dc.FindRoot(ctx, false) if err != nil { return err } - rootID, err := dc.FindDir(dir, false) + rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } @@ -931,8 +933,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error { // Rmdir deletes the root folder // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { - return f.purgeCheck(dir, true) +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs @@ -954,7 +956,7 @@ func (f *Fs) Hashes() hash.Set { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { // srcObj, ok := src.(*Object) // if !ok { // fs.Debugf(src, "Can't copy - not same remote type") @@ -965,7 +967,7 @@ func (f *Fs) Hashes() hash.Set { // if err != nil { // return nil, err // } -// return f.NewObject(remote), nil +// return f.NewObject(ctx, remote), nil //} // Purge deletes all the files and the container @@ -973,8 +975,8 @@ func (f *Fs) Hashes() hash.Set { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { - return f.purgeCheck("", false) +func (f *Fs) Purge(ctx context.Context) error { + return f.purgeCheck(ctx, "", false) } // ------------------------------------------------------------ @@ -998,7 +1000,7 @@ func (o *Object) Remote() string { } // Hash returns the Md5sum of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } @@ -1021,11 +1023,11 @@ func (o *Object) Size() int64 { // it also sets the info // // If it can't be found it returns the error fs.ErrorObjectNotFound. -func (o *Object) readMetaData() (err error) { +func (o *Object) readMetaData(ctx context.Context) (err error) { if o.info != nil { return nil } - leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false) + leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false) if err != nil { if err == fs.ErrorDirNotFound { return fs.ErrorObjectNotFound @@ -1054,8 +1056,8 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { - err := o.readMetaData() +func (o *Object) ModTime(ctx context.Context) time.Time { + err := o.readMetaData(ctx) if err != nil { fs.Debugf(o, "Failed to read metadata: %v", err) return time.Now() @@ -1069,7 +1071,7 @@ func (o *Object) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { // FIXME not implemented return fs.ErrorCantSetModTime } @@ -1080,7 +1082,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold) if bigObject { fs.Debugf(o, "Downloading large object via tempLink") @@ -1102,7 +1104,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { file := acd.File{Node: o.info} var info *acd.File var resp *http.Response @@ -1113,7 +1115,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio info, resp, err = file.Overwrite(in) o.fs.tokenRenewer.Stop() var ok bool - ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start)) + ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start)) if ok { return false, nil } @@ -1138,7 +1140,7 @@ func (f *Fs) removeNode(info *acd.Node) error { } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { return o.fs.removeNode(o.info) } @@ -1260,7 +1262,7 @@ OnConflict: } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil { return *o.info.ContentProperties.ContentType } @@ -1273,7 +1275,7 @@ func (o *Object) MimeType() string { // Automatically restarts itself in case of unexpected behaviour of the remote. // // Close the returned channel to stop being notified. -func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { +func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { checkpoint := f.opt.Checkpoint go func() { diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 8adf4efd8..c4c73c5c3 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -309,6 +309,7 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline // NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -415,7 +416,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { } else { f.root += "/" } - _, err := f.NewObject(remote) + _, err := f.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile { // File doesn't exist or is a directory so return old f @@ -454,7 +455,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object, // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } @@ -496,7 +497,7 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error // the container and root supplied // // dir is the starting directory, "" for root -func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error { +func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint, fn listFn) error { f.containerOKMu.Lock() deleted := f.containerDeleted f.containerOKMu.Unlock() @@ -523,7 +524,6 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error { Prefix: root, MaxResults: int32(maxResults), } - ctx := context.Background() directoryMarkers := map[string]struct{}{} for marker := (azblob.Marker{}); marker.NotDone(); { var response *azblob.ListBlobsHierarchySegmentResponse @@ -621,8 +621,8 @@ func (f *Fs) markContainerOK() { } // listDir lists a single directory -func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { - err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { +func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + err = f.list(ctx, dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err @@ -665,11 +665,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if f.container == "" { return f.listContainers(dir) } - return f.listDir(dir) + return f.listDir(ctx, dir) } // ListR lists the objects and directories of the Fs starting @@ -688,12 +688,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { if f.container == "" { return fs.ErrorListBucketRequired } list := walk.NewListRHelper(callback) - err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { + err = f.list(ctx, dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err @@ -745,13 +745,13 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction fs := &Object{ fs: f, remote: src.Remote(), } - return fs, fs.Update(in, src, options...) + return fs, fs.Update(ctx, in, src, options...) } // Check if the container exists @@ -784,7 +784,7 @@ func (f *Fs) dirExists() (bool, error) { } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { f.containerOKMu.Lock() defer f.containerOKMu.Unlock() if f.containerOK { @@ -831,9 +831,9 @@ func (f *Fs) Mkdir(dir string) error { } // isEmpty checks to see if a given directory is empty and returns an error if not -func (f *Fs) isEmpty(dir string) (err error) { +func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) { empty := true - err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error { + err = f.list(ctx, dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error { empty = false return nil }) @@ -880,8 +880,8 @@ func (f *Fs) deleteContainer() error { // Rmdir deletes the container if the fs is at the root // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { - err := f.isEmpty(dir) +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + err := f.isEmpty(ctx, dir) if err != nil { return err } @@ -902,7 +902,7 @@ func (f *Fs) Hashes() hash.Set { } // Purge deletes all the files and directories including the old versions. -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { dir := "" // forward compat! if f.root != "" || dir != "" { // Delegate to caller if not root container @@ -920,8 +920,8 @@ func (f *Fs) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { - err := f.Mkdir("") +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + err := f.Mkdir(ctx, "") if err != nil { return nil, err } @@ -939,7 +939,6 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } options := azblob.BlobAccessConditions{} - ctx := context.Background() var startCopy *azblob.BlobStartCopyFromURLResponse err = f.pacer.Call(func() (bool, error) { @@ -960,7 +959,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { copyStatus = getMetadata.CopyStatus() } - return f.NewObject(remote) + return f.NewObject(ctx, remote) } // ------------------------------------------------------------ @@ -984,7 +983,7 @@ func (o *Object) Remote() string { } // Hash returns the MD5 of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } @@ -1124,14 +1123,14 @@ func (o *Object) parseTimeString(timeString string) (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() (result time.Time) { +func (o *Object) ModTime(ctx context.Context) (result time.Time) { // The error is logged in readMetaData _ = o.readMetaData() return o.modTime } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { // Make sure o.meta is not nil if o.meta == nil { o.meta = make(map[string]string, 1) @@ -1140,7 +1139,6 @@ func (o *Object) SetModTime(modTime time.Time) error { o.meta[modTimeKey] = modTime.Format(timeFormatOut) blob := o.getBlobReference() - ctx := context.Background() err := o.fs.pacer.Call(func() (bool, error) { _, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}) return o.fs.shouldRetry(err) @@ -1158,7 +1156,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { // Offset and Count for range download var offset int64 var count int64 @@ -1182,7 +1180,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { } } blob := o.getBlobReference() - ctx := context.Background() ac := azblob.BlobAccessConditions{} var dowloadResponse *azblob.DownloadResponse err = o.fs.pacer.Call(func() (bool, error) { @@ -1371,26 +1368,26 @@ outer: // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { - err = o.fs.Mkdir("") +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + err = o.fs.Mkdir(ctx, "") if err != nil { return err } size := src.Size() // Update Mod time - o.updateMetadataWithModTime(src.ModTime()) + o.updateMetadataWithModTime(src.ModTime(ctx)) if err != nil { return err } blob := o.getBlobReference() httpHeaders := azblob.BlobHTTPHeaders{} - httpHeaders.ContentType = fs.MimeType(o) + httpHeaders.ContentType = fs.MimeType(ctx, o) // Compute the Content-MD5 of the file, for multiparts uploads it // will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header // Note: If multipart, a MD5 checksum will also be computed for each uploaded block // in order to validate its integrity during transport - if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" { + if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" { sourceMD5bytes, err := hex.DecodeString(sourceMD5) if err == nil { httpHeaders.ContentMD5 = sourceMD5bytes @@ -1415,7 +1412,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size) } - ctx := context.Background() // Don't retry, return a retry error instead err = o.fs.pacer.CallNoRetry(func() (bool, error) { if multipartUpload { @@ -1448,11 +1444,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { blob := o.getBlobReference() snapShotOptions := azblob.DeleteSnapshotsOptionNone ac := azblob.BlobAccessConditions{} - ctx := context.Background() return o.fs.pacer.Call(func() (bool, error) { _, err := blob.Delete(ctx, snapShotOptions, ac) return o.fs.shouldRetry(err) @@ -1460,7 +1455,7 @@ func (o *Object) Remove() error { } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { return o.mimeType } diff --git a/backend/b2/b2.go b/backend/b2/b2.go index fffa5a6c3..9bbcbfa50 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -7,6 +7,7 @@ package b2 import ( "bufio" "bytes" + "context" "crypto/sha1" "fmt" gohash "hash" @@ -324,6 +325,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { // NewFs constructs an Fs from the path, bucket:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -398,7 +400,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { } else { f.root += "/" } - _, err := f.NewObject(remote) + _, err := f.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f @@ -516,7 +518,7 @@ func (f *Fs) putUploadBlock(buf []byte) { // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. -func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error) { +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) { o := &Object{ fs: f, remote: remote, @@ -527,7 +529,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error) return nil, err } } else { - err := o.readMetaData() // reads info and headers, returning an error + err := o.readMetaData(ctx) // reads info and headers, returning an error if err != nil { return nil, err } @@ -537,8 +539,8 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error) // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { - return f.newObjectWithInfo(remote, nil) +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) } // listFn is called from list to handle an object @@ -562,7 +564,7 @@ var errEndList = errors.New("end list") // than 1000) // // If hidden is set then it will list the hidden (deleted) files too. -func (f *Fs) list(dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error { +func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error { root := f.root if dir != "" { root += dir + "/" @@ -643,7 +645,7 @@ func (f *Fs) list(dir string, recurse bool, prefix string, limit int, hidden boo } // Convert a list item into a DirEntry -func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) { +func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) { if isDirectory { d := fs.NewDir(remote, time.Time{}) return d, nil @@ -657,7 +659,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, l if object.Action == "hide" { return nil, nil } - o, err := f.newObjectWithInfo(remote, object) + o, err := f.newObjectWithInfo(ctx, remote, object) if err != nil { return nil, err } @@ -674,10 +676,10 @@ func (f *Fs) markBucketOK() { } // listDir lists a single directory -func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { last := "" - err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { - entry, err := f.itemToDirEntry(remote, object, isDirectory, &last) + err = f.list(ctx, dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { + entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last) if err != nil { return err } @@ -719,11 +721,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if f.bucket == "" { return f.listBuckets(dir) } - return f.listDir(dir) + return f.listDir(ctx, dir) } // ListR lists the objects and directories of the Fs starting @@ -742,14 +744,14 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { if f.bucket == "" { return fs.ErrorListBucketRequired } list := walk.NewListRHelper(callback) last := "" - err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { - entry, err := f.itemToDirEntry(remote, object, isDirectory, &last) + err = f.list(ctx, dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { + entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last) if err != nil { return err } @@ -834,22 +836,22 @@ func (f *Fs) clearBucketID() { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction fs := &Object{ fs: f, remote: src.Remote(), } - return fs, fs.Update(in, src, options...) + return fs, fs.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // Mkdir creates the bucket if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.bucketOK { @@ -895,7 +897,7 @@ func (f *Fs) Mkdir(dir string) error { // Rmdir deletes the bucket if the fs is at the root // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.root != "" || dir != "" { @@ -990,7 +992,7 @@ func (f *Fs) deleteByID(ID, Name string) error { // if oldOnly is true then it deletes only non current files. // // Implemented here so we can make sure we delete old versions. -func (f *Fs) purge(oldOnly bool) error { +func (f *Fs) purge(ctx context.Context, oldOnly bool) error { var errReturn error var checkErrMutex sync.Mutex var checkErr = func(err error) { @@ -1025,7 +1027,7 @@ func (f *Fs) purge(oldOnly bool) error { }() } last := "" - checkErr(f.list("", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error { + checkErr(f.list(ctx, "", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error { if !isDirectory { accounting.Stats.Checking(remote) if oldOnly && last != remote { @@ -1051,19 +1053,19 @@ func (f *Fs) purge(oldOnly bool) error { wg.Wait() if !oldOnly { - checkErr(f.Rmdir("")) + checkErr(f.Rmdir(ctx, "")) } return errReturn } // Purge deletes all the files and directories including the old versions. -func (f *Fs) Purge() error { - return f.purge(false) +func (f *Fs) Purge(ctx context.Context) error { + return f.purge(ctx, false) } // CleanUp deletes all the hidden files. -func (f *Fs) CleanUp() error { - return f.purge(true) +func (f *Fs) CleanUp(ctx context.Context) error { + return f.purge(ctx, true) } // Copy src to this remote using server side copy operations. @@ -1075,8 +1077,8 @@ func (f *Fs) CleanUp() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { - err := f.Mkdir("") +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + err := f.Mkdir(ctx, "") if err != nil { return nil, err } @@ -1144,13 +1146,13 @@ func (o *Object) Remote() string { } // Hash returns the Sha-1 of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.SHA1 { return "", hash.ErrUnsupported } if o.sha1 == "" { // Error is logged in readMetaData - err := o.readMetaData() + err := o.readMetaData(ctx) if err != nil { return "", err } @@ -1207,7 +1209,10 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) { } // getMetaData gets the metadata from the object unconditionally -func (o *Object) getMetaData() (info *api.File, err error) { +func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) { + if o.id != "" { + return nil, nil + } maxSearched := 1 var timestamp api.Timestamp baseRemote := o.remote @@ -1215,7 +1220,8 @@ func (o *Object) getMetaData() (info *api.File, err error) { timestamp, baseRemote = api.RemoveVersion(baseRemote) maxSearched = maxVersions } - err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { + + err = o.fs.list(ctx, "", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { if isDirectory { return nil } @@ -1246,11 +1252,11 @@ func (o *Object) getMetaData() (info *api.File, err error) { // o.modTime // o.size // o.sha1 -func (o *Object) readMetaData() (err error) { +func (o *Object) readMetaData(ctx context.Context) (err error) { if o.id != "" { return nil } - info, err := o.getMetaData() + info, err := o.getMetaData(ctx) if err != nil { return err } @@ -1285,15 +1291,15 @@ func (o *Object) parseTimeString(timeString string) (err error) { // LastModified returned in the http headers // // SHA-1 will also be updated once the request has completed. -func (o *Object) ModTime() (result time.Time) { +func (o *Object) ModTime(ctx context.Context) (result time.Time) { // The error is logged in readMetaData - _ = o.readMetaData() + _ = o.readMetaData(ctx) return o.modTime } // SetModTime sets the modification time of the Object -func (o *Object) SetModTime(modTime time.Time) error { - info, err := o.getMetaData() +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + info, err := o.getMetaData(ctx) if err != nil { return err } @@ -1386,7 +1392,7 @@ func (file *openFile) Close() (err error) { var _ io.ReadCloser = &openFile{} // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { opts := rest.Opts{ Method: "GET", Options: options, @@ -1477,11 +1483,11 @@ func urlEncode(in string) string { // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { if o.fs.opt.Versions { return errNotWithVersions } - err = o.fs.Mkdir("") + err = o.fs.Mkdir(ctx, "") if err != nil { return err } @@ -1499,7 +1505,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio if err == nil { fs.Debugf(o, "File is big enough for chunked streaming") - up, err := o.fs.newLargeUpload(o, in, src) + up, err := o.fs.newLargeUpload(ctx, o, in, src) if err != nil { o.fs.putUploadBlock(buf) return err @@ -1514,16 +1520,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio return err } } else if size > int64(o.fs.opt.UploadCutoff) { - up, err := o.fs.newLargeUpload(o, in, src) + up, err := o.fs.newLargeUpload(ctx, o, in, src) if err != nil { return err } return up.Upload() } - modTime := src.ModTime() + modTime := src.ModTime(ctx) - calculatedSha1, _ := src.Hash(hash.SHA1) + calculatedSha1, _ := src.Hash(ctx, hash.SHA1) if calculatedSha1 == "" { calculatedSha1 = "hex_digits_at_end" har := newHashAppendingReader(in, sha1.New()) @@ -1601,7 +1607,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio ExtraHeaders: map[string]string{ "Authorization": upload.AuthorizationToken, "X-Bz-File-Name": urlEncode(o.fs.root + o.remote), - "Content-Type": fs.MimeType(src), + "Content-Type": fs.MimeType(ctx, src), sha1Header: calculatedSha1, timeHeader: timeString(modTime), }, @@ -1626,7 +1632,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { if o.fs.opt.Versions { return errNotWithVersions } @@ -1637,7 +1643,7 @@ func (o *Object) Remove() error { } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { return o.mimeType } diff --git a/backend/b2/upload.go b/backend/b2/upload.go index 0ac60f2c7..a6cdca317 100644 --- a/backend/b2/upload.go +++ b/backend/b2/upload.go @@ -6,6 +6,7 @@ package b2 import ( "bytes" + "context" "crypto/sha1" "encoding/hex" "fmt" @@ -80,7 +81,7 @@ type largeUpload struct { } // newLargeUpload starts an upload of object o from in with metadata in src -func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) { +func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) { remote := o.remote size := src.Size() parts := int64(0) @@ -98,7 +99,7 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar sha1SliceSize = parts } - modTime := src.ModTime() + modTime := src.ModTime(ctx) opts := rest.Opts{ Method: "POST", Path: "/b2_start_large_file", @@ -110,14 +111,14 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar var request = api.StartLargeFileRequest{ BucketID: bucketID, Name: o.fs.root + remote, - ContentType: fs.MimeType(src), + ContentType: fs.MimeType(ctx, src), Info: map[string]string{ timeKey: timeString(modTime), }, } // Set the SHA1 if known if !o.fs.opt.DisableCheckSum { - if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" { + if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" { request.Info[sha1Key] = calculatedSha1 } } diff --git a/backend/box/box.go b/backend/box/box.go index f6f4a629c..b3f61bea2 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -10,6 +10,7 @@ package box // FIXME box can copy a directory import ( + "context" "fmt" "io" "log" @@ -193,9 +194,9 @@ func restoreReservedChars(x string) string { } // readMetaDataForPath reads the metadata from the path -func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) { +func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) - leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false) + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound @@ -238,6 +239,7 @@ func errorHandler(resp *http.Response) error { // NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -271,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Renew the token in the background f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { - _, err := f.readMetaDataForPath("") + _, err := f.readMetaDataForPath(ctx, "") return err }) @@ -279,7 +281,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { f.dirCache = dircache.New(root, rootID, f) // Find the current root - err = f.dirCache.FindRoot(false) + err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) @@ -287,12 +289,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent - err = tempF.dirCache.FindRoot(false) + err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } - _, err := tempF.newObjectWithInfo(remote, nil) + _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f @@ -323,7 +325,7 @@ func (f *Fs) rootSlash() string { // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. -func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) { +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, @@ -333,7 +335,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) // Set info err = o.setMetaData(info) } else { - err = o.readMetaData() // reads info and meta, returning an error + err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err @@ -343,12 +345,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { - return f.newObjectWithInfo(remote, nil) +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID -func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID found, err = f.listAll(pathID, true, false, func(item *api.Item) bool { if item.Name == leaf { @@ -368,7 +370,7 @@ func fieldsValue() url.Values { } // CreateDir makes a directory with pathID as parent and name leaf -func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) var resp *http.Response var info *api.Item @@ -467,12 +469,12 @@ OUTER: // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { - err = f.dirCache.FindRoot(false) +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + err = f.dirCache.FindRoot(ctx, false) if err != nil { return nil, err } - directoryID, err := f.dirCache.FindDir(dir, false) + directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } @@ -486,7 +488,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // FIXME more info from dir? entries = append(entries, d) } else if info.Type == api.ItemTypeFile { - o, err := f.newObjectWithInfo(remote, info) + o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true @@ -510,9 +512,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // Returns the object, leaf, directoryID and error // // Used to create new objects -func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { +func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist - leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true) + leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true) if err != nil { return } @@ -529,22 +531,22 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - existingObj, err := f.newObjectWithInfo(src.Remote(), nil) +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) switch err { case nil: - return existingObj, existingObj.Update(in, src, options...) + return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it - return f.PutUnchecked(in, src) + return f.PutUnchecked(ctx, in, src) default: return nil, err } } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // PutUnchecked the object into the container @@ -554,26 +556,26 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) - o, _, _, err := f.createObject(remote, modTime, size) + o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } - return o, o.Update(in, src, options...) + return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { - err := f.dirCache.FindRoot(true) +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + err := f.dirCache.FindRoot(ctx, true) if err != nil { return err } if dir != "" { - _, err = f.dirCache.FindDir(dir, true) + _, err = f.dirCache.FindDir(ctx, dir, true) } return err } @@ -593,17 +595,17 @@ func (f *Fs) deleteObject(id string) error { // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in -func (f *Fs) purgeCheck(dir string, check bool) error { +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache - err := dc.FindRoot(false) + err := dc.FindRoot(ctx, false) if err != nil { return err } - rootID, err := dc.FindDir(dir, false) + rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } @@ -633,8 +635,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error { // Rmdir deletes the root folder // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { - return f.purgeCheck(dir, true) +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs @@ -651,13 +653,13 @@ func (f *Fs) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } - err := srcObj.readMetaData() + err := srcObj.readMetaData(ctx) if err != nil { return nil, err } @@ -669,7 +671,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } // Create temporary object - dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } @@ -708,8 +710,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { - return f.purgeCheck("", false) +func (f *Fs) Purge(ctx context.Context) error { + return f.purgeCheck(ctx, "", false) } // move a file or folder @@ -746,7 +748,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") @@ -754,7 +756,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { } // Create temporary object - dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } @@ -780,7 +782,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -796,14 +798,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // find the root src directory - err := srcFs.dirCache.FindRoot(false) + err := srcFs.dirCache.FindRoot(ctx, false) if err != nil { return err } // find the root dst directory if dstRemote != "" { - err = f.dirCache.FindRoot(true) + err = f.dirCache.FindRoot(ctx, true) if err != nil { return err } @@ -819,14 +821,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { if dstRemote == "" { findPath = f.root } - leaf, directoryID, err = f.dirCache.FindPath(findPath, true) + leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true) if err != nil { return err } // Check destination does not exist if dstRemote != "" { - _, err = f.dirCache.FindDir(dstRemote, false) + _, err = f.dirCache.FindDir(ctx, dstRemote, false) if err == fs.ErrorDirNotFound { // OK } else if err != nil { @@ -837,7 +839,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // Find ID of src - srcID, err := srcFs.dirCache.FindDir(srcRemote, false) + srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) if err != nil { return err } @@ -852,8 +854,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. -func (f *Fs) PublicLink(remote string) (string, error) { - id, err := f.dirCache.FindDir(remote, false) +func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) { + id, err := f.dirCache.FindDir(ctx, remote, false) var opts rest.Opts if err == nil { fs.Debugf(f, "attempting to share directory '%s'", remote) @@ -865,7 +867,7 @@ func (f *Fs) PublicLink(remote string) (string, error) { } } else { fs.Debugf(f, "attempting to share single file '%s'", remote) - o, err := f.NewObject(remote) + o, err := f.NewObject(ctx, remote) if err != nil { return "", err } @@ -928,7 +930,7 @@ func (o *Object) srvPath() string { } // Hash returns the SHA-1 of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.SHA1 { return "", hash.ErrUnsupported } @@ -937,7 +939,7 @@ func (o *Object) Hash(t hash.Type) (string, error) { // Size returns the size of an object in bytes func (o *Object) Size() int64 { - err := o.readMetaData() + err := o.readMetaData(context.TODO()) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return 0 @@ -962,11 +964,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) { // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info -func (o *Object) readMetaData() (err error) { +func (o *Object) readMetaData(ctx context.Context) (err error) { if o.hasMetaData { return nil } - info, err := o.fs.readMetaDataForPath(o.remote) + info, err := o.fs.readMetaDataForPath(ctx, o.remote) if err != nil { if apiErr, ok := err.(*api.Error); ok { if apiErr.Code == "not_found" || apiErr.Code == "trashed" { @@ -983,8 +985,8 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { - err := o.readMetaData() +func (o *Object) ModTime(ctx context.Context) time.Time { + err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() @@ -993,7 +995,7 @@ func (o *Object) ModTime() time.Time { } // setModTime sets the modification time of the local fs object -func (o *Object) setModTime(modTime time.Time) (*api.Item, error) { +func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) { opts := rest.Opts{ Method: "PUT", Path: "/files/" + o.id, @@ -1011,8 +1013,8 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { - info, err := o.setModTime(modTime) +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + info, err := o.setModTime(ctx, modTime) if err != nil { return err } @@ -1025,7 +1027,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { if o.id == "" { return nil, errors.New("can't download - no id") } @@ -1093,16 +1095,16 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim // If existing is set then it updates the object rather than creating a new one // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { o.fs.tokenRenewer.Start() defer o.fs.tokenRenewer.Stop() size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) remote := o.Remote() // Create the directory for the object if it doesn't exist - leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true) + leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true) if err != nil { return err } @@ -1117,7 +1119,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { return o.fs.deleteObject(o.id) } diff --git a/backend/cache/cache.go b/backend/cache/cache.go index 4bd27deed..618532be4 100644 --- a/backend/cache/cache.go +++ b/backend/cache/cache.go @@ -509,7 +509,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) { if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil { pollInterval := make(chan time.Duration, 1) pollInterval <- time.Duration(f.opt.ChunkCleanInterval) - doChangeNotify(f.receiveChangeNotify, pollInterval) + doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval) } f.features = (&fs.Features{ @@ -600,7 +600,7 @@ is used on top of the cache. return f, fsErr } -func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) { +func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) { out = make(rc.Params) m, err := f.Stats() if err != nil { @@ -627,7 +627,7 @@ func (f *Fs) unwrapRemote(remote string) string { return remote } -func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) { +func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) { out = make(rc.Params) remoteInt, ok := in["remote"] if !ok { @@ -672,7 +672,7 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) { return out, nil } -func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) { +func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) { type chunkRange struct { start, end int64 } @@ -777,18 +777,18 @@ func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) { for _, pair := range files { file, remote := pair[0], pair[1] var status fileStatus - o, err := f.NewObject(remote) + o, err := f.NewObject(ctx, remote) if err != nil { fetchedChunks[file] = fileStatus{Error: err.Error()} continue } co := o.(*Object) - err = co.refreshFromSource(true) + err = co.refreshFromSource(ctx, true) if err != nil { fetchedChunks[file] = fileStatus{Error: err.Error()} continue } - handle := NewObjectHandle(co, f) + handle := NewObjectHandle(ctx, co, f) handle.UseMemory = false handle.scaleWorkers(1) walkChunkRanges(crs, co.Size(), func(chunk int64) { @@ -874,7 +874,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) { // ChangeNotify can subscribe multiple callers // this is coupled with the wrapped fs ChangeNotify (if it supports it) // and also notifies other caches (i.e VFS) to clear out whenever something changes -func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) { +func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) { f.parentsForgetMu.Lock() defer f.parentsForgetMu.Unlock() fs.Debugf(f, "subscribing to ChangeNotify") @@ -921,7 +921,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration { } // NewObject finds the Object at remote. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { var err error fs.Debugf(f, "new object '%s'", remote) @@ -940,16 +940,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) { // search for entry in source or temp fs var obj fs.Object if f.opt.TempWritePath != "" { - obj, err = f.tempFs.NewObject(remote) + obj, err = f.tempFs.NewObject(ctx, remote) // not found in temp fs if err != nil { fs.Debugf(remote, "find: not found in local cache fs") - obj, err = f.Fs.NewObject(remote) + obj, err = f.Fs.NewObject(ctx, remote) } else { fs.Debugf(obj, "find: found in local cache fs") } } else { - obj, err = f.Fs.NewObject(remote) + obj, err = f.Fs.NewObject(ctx, remote) } // not found in either fs @@ -959,13 +959,13 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) { } // cache the new entry - co = ObjectFromOriginal(f, obj).persist() + co = ObjectFromOriginal(ctx, f, obj).persist() fs.Debugf(co, "find: cached object") return co, nil } // List the objects and directories in dir into entries -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { fs.Debugf(f, "list '%s'", dir) cd := ShallowDirectory(f, dir) @@ -995,12 +995,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries) for _, queuedRemote := range queuedEntries { - queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote)) + queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote)) if err != nil { fs.Debugf(dir, "list: temp file not found in local fs: %v", err) continue } - co := ObjectFromOriginal(f, queuedEntry).persist() + co := ObjectFromOriginal(ctx, f, queuedEntry).persist() fs.Debugf(co, "list: cached temp object") cachedEntries = append(cachedEntries, co) } @@ -1008,7 +1008,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { } // search from the source - sourceEntries, err := f.Fs.List(dir) + sourceEntries, err := f.Fs.List(ctx, dir) if err != nil { return nil, err } @@ -1046,11 +1046,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { if i < tmpCnt && cachedEntries[i].Remote() == oRemote { continue } - co := ObjectFromOriginal(f, o).persist() + co := ObjectFromOriginal(ctx, f, o).persist() cachedEntries = append(cachedEntries, co) fs.Debugf(dir, "list: cached object: %v", co) case fs.Directory: - cdd := DirectoryFromOriginal(f, o) + cdd := DirectoryFromOriginal(ctx, f, o) // check if the dir isn't expired and add it in cache if it isn't if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) { batchDirectories = append(batchDirectories, cdd) @@ -1080,8 +1080,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { return cachedEntries, nil } -func (f *Fs) recurse(dir string, list *walk.ListRHelper) error { - entries, err := f.List(dir) +func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error { + entries, err := f.List(ctx, dir) if err != nil { return err } @@ -1089,7 +1089,7 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error { for i := 0; i < len(entries); i++ { innerDir, ok := entries[i].(fs.Directory) if ok { - err := f.recurse(innerDir.Remote(), list) + err := f.recurse(ctx, innerDir.Remote(), list) if err != nil { return err } @@ -1106,21 +1106,21 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error { // ListR lists the objects and directories of the Fs starting // from dir recursively into out. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { fs.Debugf(f, "list recursively from '%s'", dir) // we check if the source FS supports ListR // if it does, we'll use that to get all the entries, cache them and return do := f.Fs.Features().ListR if do != nil { - return do(dir, func(entries fs.DirEntries) error { + return do(ctx, dir, func(entries fs.DirEntries) error { // we got called back with a set of entries so let's cache them and call the original callback for _, entry := range entries { switch o := entry.(type) { case fs.Object: - _ = f.cache.AddObject(ObjectFromOriginal(f, o)) + _ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o)) case fs.Directory: - _ = f.cache.AddDir(DirectoryFromOriginal(f, o)) + _ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o)) default: return errors.Errorf("Unknown object type %T", entry) } @@ -1133,7 +1133,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { // if we're here, we're gonna do a standard recursive traversal and cache everything list := walk.NewListRHelper(callback) - err = f.recurse(dir, list) + err = f.recurse(ctx, dir, list) if err != nil { return err } @@ -1142,9 +1142,9 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { } // Mkdir makes the directory (container, bucket) -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { fs.Debugf(f, "mkdir '%s'", dir) - err := f.Fs.Mkdir(dir) + err := f.Fs.Mkdir(ctx, dir) if err != nil { return err } @@ -1172,7 +1172,7 @@ func (f *Fs) Mkdir(dir string) error { } // Rmdir removes the directory (container, bucket) if empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { fs.Debugf(f, "rmdir '%s'", dir) if f.opt.TempWritePath != "" { @@ -1182,9 +1182,9 @@ func (f *Fs) Rmdir(dir string) error { // we check if the source exists on the remote and make the same move on it too if it does // otherwise, we skip this step - _, err := f.UnWrap().List(dir) + _, err := f.UnWrap().List(ctx, dir) if err == nil { - err := f.Fs.Rmdir(dir) + err := f.Fs.Rmdir(ctx, dir) if err != nil { return err } @@ -1192,10 +1192,10 @@ func (f *Fs) Rmdir(dir string) error { } var queuedEntries []*Object - err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { + err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { for _, o := range entries { if oo, ok := o.(fs.Object); ok { - co := ObjectFromOriginal(f, oo) + co := ObjectFromOriginal(ctx, f, oo) queuedEntries = append(queuedEntries, co) } } @@ -1212,7 +1212,7 @@ func (f *Fs) Rmdir(dir string) error { } } } else { - err := f.Fs.Rmdir(dir) + err := f.Fs.Rmdir(ctx, dir) if err != nil { return err } @@ -1243,7 +1243,7 @@ func (f *Fs) Rmdir(dir string) error { // DirMove moves src, srcRemote to this remote at dstRemote // using server side move operations. -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote) do := f.Fs.Features().DirMove @@ -1265,8 +1265,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { f.backgroundRunner.pause() defer f.backgroundRunner.play() - _, errInWrap := srcFs.UnWrap().List(srcRemote) - _, errInTemp := f.tempFs.List(srcRemote) + _, errInWrap := srcFs.UnWrap().List(ctx, srcRemote) + _, errInTemp := f.tempFs.List(ctx, srcRemote) // not found in either fs if errInWrap != nil && errInTemp != nil { return fs.ErrorDirNotFound @@ -1275,7 +1275,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { // we check if the source exists on the remote and make the same move on it too if it does // otherwise, we skip this step if errInWrap == nil { - err := do(srcFs.UnWrap(), srcRemote, dstRemote) + err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote) if err != nil { return err } @@ -1288,10 +1288,10 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } var queuedEntries []*Object - err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { + err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { for _, o := range entries { if oo, ok := o.(fs.Object); ok { - co := ObjectFromOriginal(f, oo) + co := ObjectFromOriginal(ctx, f, oo) queuedEntries = append(queuedEntries, co) if co.tempFileStartedUpload() { fs.Errorf(co, "can't move - upload has already started. need to finish that") @@ -1312,16 +1312,16 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs") return fs.ErrorCantDirMove } - err = do(f.tempFs, srcRemote, dstRemote) + err = do(ctx, f.tempFs, srcRemote, dstRemote) if err != nil { return err } - err = f.cache.ReconcileTempUploads(f) + err = f.cache.ReconcileTempUploads(ctx, f) if err != nil { return err } } else { - err := do(srcFs.UnWrap(), srcRemote, dstRemote) + err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote) if err != nil { return err } @@ -1427,10 +1427,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i } } -type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) +type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) // put in to the remote path -func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { +func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { var err error var obj fs.Object @@ -1441,7 +1441,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p _ = f.cache.ExpireDir(parentCd) f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) - obj, err = f.tempFs.Put(in, src, options...) + obj, err = f.tempFs.Put(ctx, in, src, options...) if err != nil { fs.Errorf(obj, "put: failed to upload in temp fs: %v", err) return nil, err @@ -1456,14 +1456,14 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p // if cache writes is enabled write it first through cache } else if f.opt.StoreWrites { f.cacheReader(in, src, func(inn io.Reader) { - obj, err = put(inn, src, options...) + obj, err = put(ctx, inn, src, options...) }) if err == nil { fs.Debugf(obj, "put: uploaded to remote fs and saved in cache") } // last option: save it directly in remote fs } else { - obj, err = put(in, src, options...) + obj, err = put(ctx, in, src, options...) if err == nil { fs.Debugf(obj, "put: uploaded to remote fs") } @@ -1475,7 +1475,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p } // cache the new file - cachedObj := ObjectFromOriginal(f, obj) + cachedObj := ObjectFromOriginal(ctx, f, obj) // deleting cached chunks and info to be replaced with new ones _ = f.cache.RemoveObject(cachedObj.abs()) @@ -1498,33 +1498,33 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p } // Put in to the remote path with the modTime given of the given size -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { fs.Debugf(f, "put data at '%s'", src.Remote()) - return f.put(in, src, options, f.Fs.Put) + return f.put(ctx, in, src, options, f.Fs.Put) } // PutUnchecked uploads the object -func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { do := f.Fs.Features().PutUnchecked if do == nil { return nil, errors.New("can't PutUnchecked") } fs.Debugf(f, "put data unchecked in '%s'", src.Remote()) - return f.put(in, src, options, do) + return f.put(ctx, in, src, options, do) } // PutStream uploads the object -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { do := f.Fs.Features().PutStream if do == nil { return nil, errors.New("can't PutStream") } fs.Debugf(f, "put data streaming in '%s'", src.Remote()) - return f.put(in, src, options, do) + return f.put(ctx, in, src, options, do) } // Copy src to this remote using server side copy operations. -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote) do := f.Fs.Features().Copy @@ -1544,7 +1544,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { return nil, fs.ErrorCantCopy } // refresh from source or abort - if err := srcObj.refreshFromSource(false); err != nil { + if err := srcObj.refreshFromSource(ctx, false); err != nil { fs.Errorf(f, "can't copy %v - %v", src, err) return nil, fs.ErrorCantCopy } @@ -1563,7 +1563,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } } - obj, err := do(srcObj.Object, remote) + obj, err := do(ctx, srcObj.Object, remote) if err != nil { fs.Errorf(srcObj, "error moving in cache: %v", err) return nil, err @@ -1571,7 +1571,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { fs.Debugf(obj, "copy: file copied") // persist new - co := ObjectFromOriginal(f, obj).persist() + co := ObjectFromOriginal(ctx, f, obj).persist() fs.Debugf(co, "copy: added to cache") // expire the destination path parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote()))) @@ -1598,7 +1598,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } // Move src to this remote using server side move operations. -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { fs.Debugf(f, "moving obj '%s' -> %s", src, remote) // if source fs doesn't support move abort @@ -1619,7 +1619,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { return nil, fs.ErrorCantMove } // refresh from source or abort - if err := srcObj.refreshFromSource(false); err != nil { + if err := srcObj.refreshFromSource(ctx, false); err != nil { fs.Errorf(f, "can't move %v - %v", src, err) return nil, fs.ErrorCantMove } @@ -1655,7 +1655,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { fs.Debugf(srcObj, "move: queued file moved to %v", remote) } - obj, err := do(srcObj.Object, remote) + obj, err := do(ctx, srcObj.Object, remote) if err != nil { fs.Errorf(srcObj, "error moving: %v", err) return nil, err @@ -1680,7 +1680,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // advertise to ChangeNotify if wrapped doesn't do that f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) // persist new - cachedObj := ObjectFromOriginal(f, obj).persist() + cachedObj := ObjectFromOriginal(ctx, f, obj).persist() fs.Debugf(cachedObj, "move: added to cache") // expire new parent parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote()))) @@ -1702,7 +1702,7 @@ func (f *Fs) Hashes() hash.Set { } // Purge all files in the root and the root directory -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { fs.Infof(f, "purging cache") f.cache.Purge() @@ -1711,7 +1711,7 @@ func (f *Fs) Purge() error { return nil } - err := do() + err := do(ctx) if err != nil { return err } @@ -1720,7 +1720,7 @@ func (f *Fs) Purge() error { } // CleanUp the trash in the Fs -func (f *Fs) CleanUp() error { +func (f *Fs) CleanUp(ctx context.Context) error { f.CleanUpCache(false) do := f.Fs.Features().CleanUp @@ -1728,16 +1728,16 @@ func (f *Fs) CleanUp() error { return nil } - return do() + return do(ctx) } // About gets quota information from the Fs -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { do := f.Fs.Features().About if do == nil { return nil, errors.New("About not supported") } - return do() + return do(ctx) } // Stats returns stats about the cache storage diff --git a/backend/cache/cache_internal_test.go b/backend/cache/cache_internal_test.go index aacd5684b..5e573d96b 100644 --- a/backend/cache/cache_internal_test.go +++ b/backend/cache/cache_internal_test.go @@ -4,6 +4,7 @@ package cache_test import ( "bytes" + "context" "encoding/base64" goflag "flag" "fmt" @@ -120,7 +121,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) { require.NoError(t, err) listRootInner, err := runInstance.list(t, rootFs, innerFolder) require.NoError(t, err) - listInner, err := rootFs2.List("") + listInner, err := rootFs2.List(context.Background(), "") require.NoError(t, err) require.Len(t, listRoot, 1) @@ -138,10 +139,10 @@ func TestInternalVfsCache(t *testing.T) { rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"}) defer runInstance.cleanupFs(t, rootFs, boltDb) - err := rootFs.Mkdir("test") + err := rootFs.Mkdir(context.Background(), "test") require.NoError(t, err) runInstance.writeObjectString(t, rootFs, "test/second", "content") - _, err = rootFs.List("test") + _, err = rootFs.List(context.Background(), "test") require.NoError(t, err) testReader := runInstance.randomReader(t, testSize) @@ -266,7 +267,7 @@ func TestInternalObjNotFound(t *testing.T) { rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) defer runInstance.cleanupFs(t, rootFs, boltDb) - obj, err := rootFs.NewObject("404") + obj, err := rootFs.NewObject(context.Background(), "404") require.Error(t, err) require.Nil(t, obj) } @@ -445,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) { require.NoError(t, err) log.Printf("original size: %v", originalSize) - o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin")) + o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) require.NoError(t, err) expectedSize := int64(len([]byte("test content"))) var data2 []byte @@ -457,7 +458,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) { data2 = []byte("test content") } objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap()) - err = o.Update(bytes.NewReader(data2), objInfo) + err = o.Update(context.Background(), bytes.NewReader(data2), objInfo) require.NoError(t, err) require.Equal(t, int64(len(data2)), o.Size()) log.Printf("updated size: %v", len(data2)) @@ -503,9 +504,9 @@ func TestInternalMoveWithNotify(t *testing.T) { } else { testData = []byte("test content") } - _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test")) - _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one")) - _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second")) + _ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test")) + _ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one")) + _ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second")) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) // list in mount @@ -515,7 +516,7 @@ func TestInternalMoveWithNotify(t *testing.T) { require.NoError(t, err) // move file - _, err = cfs.UnWrap().Features().Move(srcObj, dstName) + _, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) require.NoError(t, err) err = runInstance.retryBlock(func() error { @@ -589,9 +590,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) { } else { testData = []byte("test content") } - err = rootFs.Mkdir("test") + err = rootFs.Mkdir(context.Background(), "test") require.NoError(t, err) - err = rootFs.Mkdir("test/one") + err = rootFs.Mkdir(context.Background(), "test/one") require.NoError(t, err) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) @@ -608,7 +609,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) { require.False(t, found) // move file - _, err = cfs.UnWrap().Features().Move(srcObj, dstName) + _, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) require.NoError(t, err) err = runInstance.retryBlock(func() error { @@ -670,23 +671,23 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) { runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) // update in the wrapped fs - o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin")) + o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) require.NoError(t, err) wrappedTime := time.Now().Add(-1 * time.Hour) - err = o.SetModTime(wrappedTime) + err = o.SetModTime(context.Background(), wrappedTime) require.NoError(t, err) // get a new instance from the cache - co, err := rootFs.NewObject("data.bin") + co, err := rootFs.NewObject(context.Background(), "data.bin") require.NoError(t, err) - require.NotEqual(t, o.ModTime().String(), co.ModTime().String()) + require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String()) cfs.DirCacheFlush() // flush the cache // get a new instance from the cache - co, err = rootFs.NewObject("data.bin") + co, err = rootFs.NewObject(context.Background(), "data.bin") require.NoError(t, err) - require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix()) + require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix()) } func TestInternalChangeSeenAfterRc(t *testing.T) { @@ -713,19 +714,19 @@ func TestInternalChangeSeenAfterRc(t *testing.T) { runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) // update in the wrapped fs - o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin")) + o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) require.NoError(t, err) wrappedTime := time.Now().Add(-1 * time.Hour) - err = o.SetModTime(wrappedTime) + err = o.SetModTime(context.Background(), wrappedTime) require.NoError(t, err) // get a new instance from the cache - co, err := rootFs.NewObject("data.bin") + co, err := rootFs.NewObject(context.Background(), "data.bin") require.NoError(t, err) - require.NotEqual(t, o.ModTime().String(), co.ModTime().String()) + require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String()) // Call the rc function - m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"}) + m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"}) require.NoError(t, err) require.Contains(t, m, "status") require.Contains(t, m, "message") @@ -733,9 +734,9 @@ func TestInternalChangeSeenAfterRc(t *testing.T) { require.Contains(t, m["message"], "cached file cleared") // get a new instance from the cache - co, err = rootFs.NewObject("data.bin") + co, err = rootFs.NewObject(context.Background(), "data.bin") require.NoError(t, err) - require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix()) + require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix()) _, err = runInstance.list(t, rootFs, "") require.NoError(t, err) @@ -749,7 +750,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) { require.Len(t, li1, 1) // Call the rc function - m, err = cacheExpire.Fn(rc.Params{"remote": "/"}) + m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"}) require.NoError(t, err) require.Contains(t, m, "status") require.Contains(t, m, "message") @@ -794,7 +795,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) { // create some rand test data testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2)) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) - o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin")) + o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) require.NoError(t, err) co, ok := o.(*cache.Object) require.True(t, ok) @@ -833,7 +834,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) { require.NoError(t, err) require.Len(t, l, 1) - err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third")) + err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third")) require.NoError(t, err) l, err = runInstance.list(t, rootFs, "test") @@ -868,14 +869,14 @@ func TestInternalBug2117(t *testing.T) { cfs, err := runInstance.getCacheFs(rootFs) require.NoError(t, err) - err = cfs.UnWrap().Mkdir("test") + err = cfs.UnWrap().Mkdir(context.Background(), "test") require.NoError(t, err) for i := 1; i <= 4; i++ { - err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i)) + err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i)) require.NoError(t, err) for j := 1; j <= 4; j++ { - err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j)) + err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j)) require.NoError(t, err) runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test") @@ -1080,10 +1081,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool } if purge { - _ = f.Features().Purge() + _ = f.Features().Purge(context.Background()) require.NoError(t, err) } - err = f.Mkdir("") + err = f.Mkdir(context.Background(), "") require.NoError(t, err) if r.useMount && !r.isMounted { r.mountFs(t, f) @@ -1097,7 +1098,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) { r.unmountFs(t, f) } - err := f.Features().Purge() + err := f.Features().Purge(context.Background()) require.NoError(t, err) cfs, err := r.getCacheFs(f) require.NoError(t, err) @@ -1199,7 +1200,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object { in := bytes.NewReader(data) _ = r.writeObjectReader(t, f, remote, in) - o, err := f.NewObject(remote) + o, err := f.NewObject(context.Background(), remote) require.NoError(t, err) require.Equal(t, int64(len(data)), o.Size()) return o @@ -1208,7 +1209,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object { modTime := time.Now() objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f) - obj, err := f.Put(in, objInfo) + obj, err := f.Put(context.Background(), in, objInfo) require.NoError(t, err) if r.useMount { r.vfs.WaitForWriters(10 * time.Second) @@ -1228,18 +1229,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600) require.NoError(t, err) r.vfs.WaitForWriters(10 * time.Second) - obj, err = f.NewObject(remote) + obj, err = f.NewObject(context.Background(), remote) } else { in1 := bytes.NewReader(data1) in2 := bytes.NewReader(data2) objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f) objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f) - obj, err = f.Put(in1, objInfo1) + obj, err = f.Put(context.Background(), in1, objInfo1) require.NoError(t, err) - obj, err = f.NewObject(remote) + obj, err = f.NewObject(context.Background(), remote) require.NoError(t, err) - err = obj.Update(in2, objInfo2) + err = obj.Update(context.Background(), in2, objInfo2) } require.NoError(t, err) @@ -1268,7 +1269,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e return checkSample, err } } else { - co, err := f.NewObject(remote) + co, err := f.NewObject(context.Background(), remote) if err != nil { return checkSample, err } @@ -1283,7 +1284,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte { size := end - offset checkSample := make([]byte, size) - reader, err := o.Open(&fs.SeekOption{Offset: offset}) + reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset}) require.NoError(t, err) totalRead, err := io.ReadFull(reader, checkSample) if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck { @@ -1300,7 +1301,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) { if r.useMount { err = os.Mkdir(path.Join(r.mntDir, remote), 0700) } else { - err = f.Mkdir(remote) + err = f.Mkdir(context.Background(), remote) } require.NoError(t, err) } @@ -1312,11 +1313,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error { err = os.Remove(path.Join(r.mntDir, remote)) } else { var obj fs.Object - obj, err = f.NewObject(remote) + obj, err = f.NewObject(context.Background(), remote) if err != nil { - err = f.Rmdir(remote) + err = f.Rmdir(context.Background(), remote) } else { - err = obj.Remove() + err = obj.Remove(context.Background()) } } @@ -1334,7 +1335,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) } } else { var list fs.DirEntries - list, err = f.List(remote) + list, err = f.List(context.Background(), remote) for _, ll := range list { l = append(l, ll) } @@ -1353,7 +1354,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string { } } else { var list fs.DirEntries - list, err = f.List(remote) + list, err = f.List(context.Background(), remote) for _, ll := range list { l = append(l, ll.Remote()) } @@ -1393,7 +1394,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error { } r.vfs.WaitForWriters(10 * time.Second) } else if rootFs.Features().DirMove != nil { - err = rootFs.Features().DirMove(rootFs, src, dst) + err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst) if err != nil { return err } @@ -1415,11 +1416,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error { } r.vfs.WaitForWriters(10 * time.Second) } else if rootFs.Features().Move != nil { - obj1, err := rootFs.NewObject(src) + obj1, err := rootFs.NewObject(context.Background(), src) if err != nil { return err } - _, err = rootFs.Features().Move(obj1, dst) + _, err = rootFs.Features().Move(context.Background(), obj1, dst) if err != nil { return err } @@ -1441,11 +1442,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error { } r.vfs.WaitForWriters(10 * time.Second) } else if rootFs.Features().Copy != nil { - obj, err := rootFs.NewObject(src) + obj, err := rootFs.NewObject(context.Background(), src) if err != nil { return err } - _, err = rootFs.Features().Copy(obj, dst) + _, err = rootFs.Features().Copy(context.Background(), obj, dst) if err != nil { return err } @@ -1467,11 +1468,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) } return fi.ModTime(), nil } - obj1, err := rootFs.NewObject(src) + obj1, err := rootFs.NewObject(context.Background(), src) if err != nil { return time.Time{}, err } - return obj1.ModTime(), nil + return obj1.ModTime(context.Background()), nil } func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) { @@ -1484,7 +1485,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) { } return fi.Size(), nil } - obj1, err := rootFs.NewObject(src) + obj1, err := rootFs.NewObject(context.Background(), src) if err != nil { return int64(0), err } @@ -1507,14 +1508,14 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e _, err = f.WriteString(data + append) } else { var obj1 fs.Object - obj1, err = rootFs.NewObject(src) + obj1, err = rootFs.NewObject(context.Background(), src) if err != nil { return err } data1 := []byte(data + append) r := bytes.NewReader(data1) objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs) - err = obj1.Update(r, objInfo1) + err = obj1.Update(context.Background(), r, objInfo1) } return err diff --git a/backend/cache/cache_upload_test.go b/backend/cache/cache_upload_test.go index 7c63ef33a..47751ef0e 100644 --- a/backend/cache/cache_upload_test.go +++ b/backend/cache/cache_upload_test.go @@ -3,6 +3,7 @@ package cache_test import ( + "context" "fmt" "math/rand" "os" @@ -85,11 +86,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) { map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) defer runInstance.cleanupFs(t, rootFs, boltDb) - err := rootFs.Mkdir("one") + err := rootFs.Mkdir(context.Background(), "one") require.NoError(t, err) - err = rootFs.Mkdir("one/test") + err = rootFs.Mkdir(context.Background(), "one/test") require.NoError(t, err) - err = rootFs.Mkdir("second") + err = rootFs.Mkdir(context.Background(), "second") require.NoError(t, err) // create some rand test data @@ -122,11 +123,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) { map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) defer runInstance.cleanupFs(t, rootFs, boltDb) - err := rootFs.Mkdir("one") + err := rootFs.Mkdir(context.Background(), "one") require.NoError(t, err) - err = rootFs.Mkdir("one/test") + err = rootFs.Mkdir(context.Background(), "one/test") require.NoError(t, err) - err = rootFs.Mkdir("second") + err = rootFs.Mkdir(context.Background(), "second") require.NoError(t, err) // create some rand test data @@ -165,7 +166,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) { map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) defer runInstance.cleanupFs(t, rootFs, boltDb) - err := rootFs.Mkdir("test") + err := rootFs.Mkdir(context.Background(), "test") require.NoError(t, err) minSize := 5242880 maxSize := 10485760 @@ -233,9 +234,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) { err = runInstance.dirMove(t, rootFs, "test", "second") if err != errNotSupported { require.NoError(t, err) - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.Error(t, err) - _, err = rootFs.NewObject("second/one") + _, err = rootFs.NewObject(context.Background(), "second/one") require.NoError(t, err) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) @@ -256,7 +257,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) { err = runInstance.rm(t, rootFs, "test") require.Error(t, err) require.Contains(t, err.Error(), "directory not empty") - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) @@ -270,9 +271,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) { if err != errNotSupported { require.NoError(t, err) // try to read from it - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.Error(t, err) - _, err = rootFs.NewObject("test/second") + _, err = rootFs.NewObject(context.Background(), "test/second") require.NoError(t, err) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false) require.NoError(t, err) @@ -289,9 +290,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) { err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) if err != errNotSupported { require.NoError(t, err) - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) - _, err = rootFs.NewObject("test/third") + _, err = rootFs.NewObject(context.Background(), "test/third") require.NoError(t, err) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) require.NoError(t, err) @@ -306,7 +307,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) { // test Remove -- allowed err = runInstance.rm(t, rootFs, "test/one") require.NoError(t, err) - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.Error(t, err) // validate that it doesn't exist in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) @@ -318,7 +319,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) { require.NoError(t, err) err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated") require.NoError(t, err) - obj2, err := rootFs.NewObject("test/one") + obj2, err := rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false) require.Equal(t, "one content updated", string(data2)) @@ -366,7 +367,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) { err = runInstance.dirMove(t, rootFs, "test", "second") if err != errNotSupported { require.Error(t, err) - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) @@ -378,7 +379,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) { // test Rmdir err = runInstance.rm(t, rootFs, "test") require.Error(t, err) - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) // validate that it doesn't exist in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) @@ -389,9 +390,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) { if err != errNotSupported { require.Error(t, err) // try to read from it - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) - _, err = rootFs.NewObject("test/second") + _, err = rootFs.NewObject(context.Background(), "test/second") require.Error(t, err) // validate that it exists in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) @@ -404,9 +405,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) { err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) if err != errNotSupported { require.NoError(t, err) - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) - _, err = rootFs.NewObject("test/third") + _, err = rootFs.NewObject(context.Background(), "test/third") require.NoError(t, err) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) require.NoError(t, err) @@ -421,7 +422,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) { // test Remove err = runInstance.rm(t, rootFs, "test/one") require.Error(t, err) - _, err = rootFs.NewObject("test/one") + _, err = rootFs.NewObject(context.Background(), "test/one") require.NoError(t, err) // validate that it doesn't exist in temp fs _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) diff --git a/backend/cache/directory.go b/backend/cache/directory.go index 877313d58..66ca15c80 100644 --- a/backend/cache/directory.go +++ b/backend/cache/directory.go @@ -3,6 +3,7 @@ package cache import ( + "context" "path" "time" @@ -55,7 +56,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory { } // DirectoryFromOriginal builds one from a generic fs.Directory -func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory { +func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory { var cd *Directory fullRemote := path.Join(f.Root(), d.Remote()) @@ -67,7 +68,7 @@ func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory { CacheFs: f, Name: name, Dir: dir, - CacheModTime: d.ModTime().UnixNano(), + CacheModTime: d.ModTime(ctx).UnixNano(), CacheSize: d.Size(), CacheItems: d.Items(), CacheType: "Directory", @@ -110,7 +111,7 @@ func (d *Directory) parentRemote() string { } // ModTime returns the cached ModTime -func (d *Directory) ModTime() time.Time { +func (d *Directory) ModTime(ctx context.Context) time.Time { return time.Unix(0, d.CacheModTime) } diff --git a/backend/cache/handle.go b/backend/cache/handle.go index 6c0833eb6..6391513d9 100644 --- a/backend/cache/handle.go +++ b/backend/cache/handle.go @@ -3,6 +3,7 @@ package cache import ( + "context" "fmt" "io" "path" @@ -40,6 +41,7 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) { // Handle is managing the read/write/seek operations on an open handle type Handle struct { + ctx context.Context cachedObject *Object cfs *Fs memory *Memory @@ -58,8 +60,9 @@ type Handle struct { } // NewObjectHandle returns a new Handle for an existing Object -func NewObjectHandle(o *Object, cfs *Fs) *Handle { +func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle { r := &Handle{ + ctx: ctx, cachedObject: o, cfs: cfs, offset: 0, @@ -351,7 +354,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error r := w.rc if w.rc == nil { r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { - return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1}) + return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) }) if err != nil { return nil, err @@ -361,7 +364,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error if !closeOpen { if do, ok := r.(fs.RangeSeeker); ok { - _, err = do.RangeSeek(offset, io.SeekStart, end-offset) + _, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset) return r, err } else if do, ok := r.(io.Seeker); ok { _, err = do.Seek(offset, io.SeekStart) @@ -371,7 +374,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error _ = w.rc.Close() return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { - r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1}) + r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) if err != nil { return nil, err } @@ -449,7 +452,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) { // we seem to be getting only errors so we abort if err != nil { fs.Errorf(w, "object open failed %v: %v", chunkStart, err) - err = w.r.cachedObject.refreshFromSource(true) + err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) if err != nil { fs.Errorf(w, "%v", err) } @@ -462,7 +465,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) { sourceRead, err = io.ReadFull(w.rc, data) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err) - err = w.r.cachedObject.refreshFromSource(true) + err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) if err != nil { fs.Errorf(w, "%v", err) } @@ -588,7 +591,7 @@ func (b *backgroundWriter) run() { remote := b.fs.cleanRootFromPath(absPath) b.notify(remote, BackgroundUploadStarted, nil) fs.Infof(remote, "background upload: started upload") - err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote) + err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote) if err != nil { b.notify(remote, BackgroundUploadError, err) _ = b.fs.cache.rollbackPendingUpload(absPath) @@ -598,14 +601,14 @@ func (b *backgroundWriter) run() { // clean empty dirs up to root thisDir := cleanPath(path.Dir(remote)) for thisDir != "" { - thisList, err := b.fs.tempFs.List(thisDir) + thisList, err := b.fs.tempFs.List(context.TODO(), thisDir) if err != nil { break } if len(thisList) > 0 { break } - err = b.fs.tempFs.Rmdir(thisDir) + err = b.fs.tempFs.Rmdir(context.TODO(), thisDir) fs.Debugf(thisDir, "cleaned from temp path") if err != nil { break diff --git a/backend/cache/object.go b/backend/cache/object.go index d468981c5..fe37b783d 100644 --- a/backend/cache/object.go +++ b/backend/cache/object.go @@ -3,6 +3,7 @@ package cache import ( + "context" "io" "path" "sync" @@ -68,7 +69,7 @@ func NewObject(f *Fs, remote string) *Object { } // ObjectFromOriginal builds one from a generic fs.Object -func ObjectFromOriginal(f *Fs, o fs.Object) *Object { +func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object { var co *Object fullRemote := cleanPath(path.Join(f.Root(), o.Remote())) dir, name := path.Split(fullRemote) @@ -92,13 +93,13 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object { CacheType: cacheType, CacheTs: time.Now(), } - co.updateData(o) + co.updateData(ctx, o) return co } -func (o *Object) updateData(source fs.Object) { +func (o *Object) updateData(ctx context.Context, source fs.Object) { o.Object = source - o.CacheModTime = source.ModTime().UnixNano() + o.CacheModTime = source.ModTime(ctx).UnixNano() o.CacheSize = source.Size() o.CacheStorable = source.Storable() o.CacheTs = time.Now() @@ -130,20 +131,20 @@ func (o *Object) abs() string { } // ModTime returns the cached ModTime -func (o *Object) ModTime() time.Time { - _ = o.refresh() +func (o *Object) ModTime(ctx context.Context) time.Time { + _ = o.refresh(ctx) return time.Unix(0, o.CacheModTime) } // Size returns the cached Size func (o *Object) Size() int64 { - _ = o.refresh() + _ = o.refresh(context.TODO()) return o.CacheSize } // Storable returns the cached Storable func (o *Object) Storable() bool { - _ = o.refresh() + _ = o.refresh(context.TODO()) return o.CacheStorable } @@ -151,18 +152,18 @@ func (o *Object) Storable() bool { // all these conditions must be true to ignore a refresh // 1. cache ts didn't expire yet // 2. is not pending a notification from the wrapped fs -func (o *Object) refresh() error { +func (o *Object) refresh(ctx context.Context) error { isNotified := o.CacheFs.isNotifiedRemote(o.Remote()) isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge))) if !isExpired && !isNotified { return nil } - return o.refreshFromSource(true) + return o.refreshFromSource(ctx, true) } // refreshFromSource requests the original FS for the object in case it comes from a cached entry -func (o *Object) refreshFromSource(force bool) error { +func (o *Object) refreshFromSource(ctx context.Context, force bool) error { o.refreshMutex.Lock() defer o.refreshMutex.Unlock() var err error @@ -172,29 +173,29 @@ func (o *Object) refreshFromSource(force bool) error { return nil } if o.isTempFile() { - liveObject, err = o.ParentFs.NewObject(o.Remote()) + liveObject, err = o.ParentFs.NewObject(ctx, o.Remote()) err = errors.Wrapf(err, "in parent fs %v", o.ParentFs) } else { - liveObject, err = o.CacheFs.Fs.NewObject(o.Remote()) + liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote()) err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs) } if err != nil { fs.Errorf(o, "error refreshing object in : %v", err) return err } - o.updateData(liveObject) + o.updateData(ctx, liveObject) o.persist() return nil } // SetModTime sets the ModTime of this object -func (o *Object) SetModTime(t time.Time) error { - if err := o.refreshFromSource(false); err != nil { +func (o *Object) SetModTime(ctx context.Context, t time.Time) error { + if err := o.refreshFromSource(ctx, false); err != nil { return err } - err := o.Object.SetModTime(t) + err := o.Object.SetModTime(ctx, t) if err != nil { return err } @@ -207,19 +208,19 @@ func (o *Object) SetModTime(t time.Time) error { } // Open is used to request a specific part of the file using fs.RangeOption -func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { var err error if o.Object == nil { - err = o.refreshFromSource(true) + err = o.refreshFromSource(ctx, true) } else { - err = o.refresh() + err = o.refresh(ctx) } if err != nil { return nil, err } - cacheReader := NewObjectHandle(o, o.CacheFs) + cacheReader := NewObjectHandle(ctx, o, o.CacheFs) var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { @@ -238,8 +239,8 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { } // Update will change the object data -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - if err := o.refreshFromSource(false); err != nil { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + if err := o.refreshFromSource(ctx, false); err != nil { return err } // pause background uploads if active @@ -254,7 +255,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio fs.Debugf(o, "updating object contents with size %v", src.Size()) // FIXME use reliable upload - err := o.Object.Update(in, src, options...) + err := o.Object.Update(ctx, in, src, options...) if err != nil { fs.Errorf(o, "error updating source: %v", err) return err @@ -265,7 +266,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // advertise to ChangeNotify if wrapped doesn't do that o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject) - o.CacheModTime = src.ModTime().UnixNano() + o.CacheModTime = src.ModTime(ctx).UnixNano() o.CacheSize = src.Size() o.CacheHashes = make(map[hash.Type]string) o.CacheTs = time.Now() @@ -275,8 +276,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove deletes the object from both the cache and the source -func (o *Object) Remove() error { - if err := o.refreshFromSource(false); err != nil { +func (o *Object) Remove(ctx context.Context) error { + if err := o.refreshFromSource(ctx, false); err != nil { return err } // pause background uploads if active @@ -288,7 +289,7 @@ func (o *Object) Remove() error { return errors.Errorf("%v is currently uploading, can't delete", o) } } - err := o.Object.Remove() + err := o.Object.Remove(ctx) if err != nil { return err } @@ -306,8 +307,8 @@ func (o *Object) Remove() error { // Hash requests a hash of the object and stores in the cache // since it might or might not be called, this is lazy loaded -func (o *Object) Hash(ht hash.Type) (string, error) { - _ = o.refresh() +func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { + _ = o.refresh(ctx) if o.CacheHashes == nil { o.CacheHashes = make(map[hash.Type]string) } @@ -316,10 +317,10 @@ func (o *Object) Hash(ht hash.Type) (string, error) { if found { return cachedHash, nil } - if err := o.refreshFromSource(false); err != nil { + if err := o.refreshFromSource(ctx, false); err != nil { return "", err } - liveHash, err := o.Object.Hash(ht) + liveHash, err := o.Object.Hash(ctx, ht) if err != nil { return "", err } diff --git a/backend/cache/storage_persistent.go b/backend/cache/storage_persistent.go index f0d78d6c7..9601cc20f 100644 --- a/backend/cache/storage_persistent.go +++ b/backend/cache/storage_persistent.go @@ -4,6 +4,7 @@ package cache import ( "bytes" + "context" "encoding/binary" "encoding/json" "fmt" @@ -1014,7 +1015,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error { } // ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue -func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error { +func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error { return b.db.Update(func(tx *bolt.Tx) error { _ = tx.DeleteBucket([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) @@ -1023,7 +1024,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error { } var queuedEntries []fs.Object - err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { + err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { for _, o := range entries { if oo, ok := o.(fs.Object); ok { queuedEntries = append(queuedEntries, oo) diff --git a/backend/crypt/cipher.go b/backend/crypt/cipher.go index c3dd87768..b07ee551d 100644 --- a/backend/crypt/cipher.go +++ b/backend/crypt/cipher.go @@ -2,6 +2,7 @@ package crypt import ( "bytes" + "context" "crypto/aes" gocipher "crypto/cipher" "crypto/rand" @@ -68,7 +69,7 @@ type ReadSeekCloser interface { } // OpenRangeSeek opens the file handle at the offset with the limit given -type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error) +type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) // Cipher is used to swap out the encryption implementations type Cipher interface { @@ -85,7 +86,7 @@ type Cipher interface { // DecryptData DecryptData(io.ReadCloser) (io.ReadCloser, error) // DecryptDataSeek decrypt at a given position - DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) + DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) // EncryptedSize calculates the size of the data when encrypted EncryptedSize(int64) int64 // DecryptedSize calculates the size of the data when decrypted @@ -755,22 +756,22 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) { } // newDecrypterSeek creates a new file handle decrypting on the fly -func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) { +func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) { var rc io.ReadCloser doRangeSeek := false setLimit := false // Open initially with no seek if offset == 0 && limit < 0 { // If no offset or limit then open whole file - rc, err = open(0, -1) + rc, err = open(ctx, 0, -1) } else if offset == 0 { // If no offset open the header + limit worth of the file _, underlyingLimit, _, _ := calculateUnderlying(offset, limit) - rc, err = open(0, int64(fileHeaderSize)+underlyingLimit) + rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit) setLimit = true } else { // Otherwise just read the header to start with - rc, err = open(0, int64(fileHeaderSize)) + rc, err = open(ctx, 0, int64(fileHeaderSize)) doRangeSeek = true } if err != nil { @@ -783,7 +784,7 @@ func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh * } fh.open = open // will be called by fh.RangeSeek if doRangeSeek { - _, err = fh.RangeSeek(offset, io.SeekStart, limit) + _, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit) if err != nil { _ = fh.Close() return nil, err @@ -903,7 +904,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit // limiting the total length to limit. // // RangeSeek with a limit of < 0 is equivalent to a regular Seek. -func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) { +func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) { fh.mu.Lock() defer fh.mu.Unlock() @@ -930,7 +931,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er // Can we seek underlying stream directly? if do, ok := fh.rc.(fs.RangeSeeker); ok { // Seek underlying stream directly - _, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit) + _, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit) if err != nil { return 0, fh.finish(err) } @@ -940,7 +941,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er fh.rc = nil // Re-open the underlying object with the offset given - rc, err := fh.open(underlyingOffset, underlyingLimit) + rc, err := fh.open(ctx, underlyingOffset, underlyingLimit) if err != nil { return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit")) } @@ -969,7 +970,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er // Seek implements the io.Seeker interface func (fh *decrypter) Seek(offset int64, whence int) (int64, error) { - return fh.RangeSeek(offset, whence, -1) + return fh.RangeSeek(context.TODO(), offset, whence, -1) } // finish sets the final error and tidies up @@ -1043,8 +1044,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) { // The open function must return a ReadCloser opened to the offset supplied // // You must use this form of DecryptData if you might want to Seek the file handle -func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { - out, err := c.newDecrypterSeek(open, offset, limit) +func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { + out, err := c.newDecrypterSeek(ctx, open, offset, limit) if err != nil { return nil, err } diff --git a/backend/crypt/cipher_test.go b/backend/crypt/cipher_test.go index dc20c97a7..b7e680d4e 100644 --- a/backend/crypt/cipher_test.go +++ b/backend/crypt/cipher_test.go @@ -2,6 +2,7 @@ package crypt import ( "bytes" + "context" "encoding/base32" "fmt" "io" @@ -965,7 +966,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) { // Open stream with a seek of underlyingOffset var reader io.ReadCloser - open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { + open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { end := len(ciphertext) if underlyingLimit >= 0 { end = int(underlyingOffset + underlyingLimit) @@ -1006,7 +1007,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) { if offset+limit > len(plaintext) { continue } - rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit)) + rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit)) assert.NoError(t, err) check(rc, offset, limit) @@ -1014,14 +1015,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) { } // Try decoding it with a single open and lots of seeks - fh, err := c.DecryptDataSeek(open, 0, -1) + fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1) assert.NoError(t, err) for _, offset := range trials { for _, limit := range limits { if offset+limit > len(plaintext) { continue } - _, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit)) + _, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit)) assert.NoError(t, err) check(fh, offset, limit) @@ -1072,7 +1073,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) { } { what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit) callCount := 0 - testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { + testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { switch callCount { case 0: assert.Equal(t, int64(0), underlyingOffset, what) @@ -1084,11 +1085,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) { t.Errorf("Too many calls %d for %s", callCount+1, what) } callCount++ - return open(underlyingOffset, underlyingLimit) + return open(ctx, underlyingOffset, underlyingLimit) } - fh, err := c.DecryptDataSeek(testOpen, 0, -1) + fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1) assert.NoError(t, err) - gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit) + gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit) assert.NoError(t, err) assert.Equal(t, gotOffset, test.offset) } diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index 18025f101..6fa14e00e 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -2,6 +2,7 @@ package crypt import ( + "context" "fmt" "io" "strings" @@ -232,7 +233,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) { } // Encrypt an directory file name to entries. -func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) { +func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) { remote := dir.Remote() decryptedRemote, err := f.cipher.DecryptDirName(remote) if err != nil { @@ -242,18 +243,18 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) { if f.opt.ShowMapping { fs.Logf(decryptedRemote, "Encrypts to %q", remote) } - *entries = append(*entries, f.newDir(dir)) + *entries = append(*entries, f.newDir(ctx, dir)) } // Encrypt some directory entries. This alters entries returning it as newEntries. -func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) { +func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) { newEntries = entries[:0] // in place filter for _, entry := range entries { switch x := entry.(type) { case fs.Object: f.add(&newEntries, x) case fs.Directory: - f.addDir(&newEntries, x) + f.addDir(ctx, &newEntries, x) default: return nil, errors.Errorf("Unknown object type %T", entry) } @@ -270,12 +271,12 @@ func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { - entries, err = f.Fs.List(f.cipher.EncryptDirName(dir)) +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir)) if err != nil { return nil, err } - return f.encryptEntries(entries) + return f.encryptEntries(ctx, entries) } // ListR lists the objects and directories of the Fs starting @@ -294,9 +295,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { - return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error { - newEntries, err := f.encryptEntries(entries) +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { + return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error { + newEntries, err := f.encryptEntries(ctx, entries) if err != nil { return err } @@ -305,18 +306,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { } // NewObject finds the Object at remote. -func (f *Fs) NewObject(remote string) (fs.Object, error) { - o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote)) +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote)) if err != nil { return nil, err } return f.newObject(o), nil } -type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) +type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) // put implements Put or PutStream -func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { +func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { // Encrypt the data into wrappedIn wrappedIn, err := f.cipher.EncryptData(in) if err != nil { @@ -342,7 +343,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p } // Transfer the data - o, err := put(wrappedIn, f.newObjectInfo(src), options...) + o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...) if err != nil { return nil, err } @@ -351,13 +352,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p if ht != hash.None && hasher != nil { srcHash := hasher.Sums()[ht] var dstHash string - dstHash, err = o.Hash(ht) + dstHash, err = o.Hash(ctx, ht) if err != nil { return nil, errors.Wrap(err, "failed to read destination hash") } if srcHash != "" && dstHash != "" && srcHash != dstHash { // remove object - err = o.Remove() + err = o.Remove(ctx) if err != nil { fs.Errorf(o, "Failed to remove corrupted object: %v", err) } @@ -373,13 +374,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.put(in, src, options, f.Fs.Put) +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.put(ctx, in, src, options, f.Fs.Put) } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.put(in, src, options, f.Fs.Features().PutStream) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.put(ctx, in, src, options, f.Fs.Features().PutStream) } // Hashes returns the supported hash sets. @@ -390,15 +391,15 @@ func (f *Fs) Hashes() hash.Set { // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists -func (f *Fs) Mkdir(dir string) error { - return f.Fs.Mkdir(f.cipher.EncryptDirName(dir)) +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir)) } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty -func (f *Fs) Rmdir(dir string) error { - return f.Fs.Rmdir(f.cipher.EncryptDirName(dir)) +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir)) } // Purge all files in the root and the root directory @@ -407,12 +408,12 @@ func (f *Fs) Rmdir(dir string) error { // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { do := f.Fs.Features().Purge if do == nil { return fs.ErrorCantPurge } - return do() + return do(ctx) } // Copy src to this remote using server side copy operations. @@ -424,7 +425,7 @@ func (f *Fs) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Copy if do == nil { return nil, fs.ErrorCantCopy @@ -433,7 +434,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { if !ok { return nil, fs.ErrorCantCopy } - oResult, err := do(o.Object, f.cipher.EncryptFileName(remote)) + oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) if err != nil { return nil, err } @@ -449,7 +450,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Move if do == nil { return nil, fs.ErrorCantMove @@ -458,7 +459,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { if !ok { return nil, fs.ErrorCantMove } - oResult, err := do(o.Object, f.cipher.EncryptFileName(remote)) + oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) if err != nil { return nil, err } @@ -473,7 +474,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { do := f.Fs.Features().DirMove if do == nil { return fs.ErrorCantDirMove @@ -483,14 +484,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } - return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote)) + return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote)) } // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. -func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { do := f.Fs.Features().PutUnchecked if do == nil { return nil, errors.New("can't PutUnchecked") @@ -499,7 +500,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt if err != nil { return nil, err } - o, err := do(wrappedIn, f.newObjectInfo(src)) + o, err := do(ctx, wrappedIn, f.newObjectInfo(src)) if err != nil { return nil, err } @@ -510,21 +511,21 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt // // Implement this if you have a way of emptying the trash or // otherwise cleaning up old versions of files. -func (f *Fs) CleanUp() error { +func (f *Fs) CleanUp(ctx context.Context) error { do := f.Fs.Features().CleanUp if do == nil { return errors.New("can't CleanUp") } - return do() + return do(ctx) } // About gets quota information from the Fs -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { do := f.Fs.Features().About if do == nil { return nil, errors.New("About not supported") } - return do() + return do(ctx) } // UnWrap returns the Fs that this Fs is wrapping @@ -556,10 +557,10 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) { // src with it, and calculates the hash given by HashType on the fly // // Note that we break lots of encapsulation in this function. -func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { +func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { // Read the nonce - opening the file is sufficient to read the nonce in // use a limited read so we only read the header - in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) + in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) if err != nil { return "", errors.Wrap(err, "failed to open object to read nonce") } @@ -589,7 +590,7 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr } // Open the src for input - in, err = src.Open() + in, err = src.Open(ctx) if err != nil { return "", errors.Wrap(err, "failed to open src") } @@ -616,16 +617,16 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. -func (f *Fs) MergeDirs(dirs []fs.Directory) error { +func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { do := f.Fs.Features().MergeDirs if do == nil { return errors.New("MergeDirs not supported") } out := make([]fs.Directory, len(dirs)) for i, dir := range dirs { - out[i] = fs.NewDirCopy(dir).SetRemote(f.cipher.EncryptDirName(dir.Remote())) + out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote())) } - return do(out) + return do(ctx, out) } // DirCacheFlush resets the directory cache - used in testing @@ -638,23 +639,23 @@ func (f *Fs) DirCacheFlush() { } // PublicLink generates a public link to the remote path (usually readable by anyone) -func (f *Fs) PublicLink(remote string) (string, error) { +func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) { do := f.Fs.Features().PublicLink if do == nil { return "", errors.New("PublicLink not supported") } - o, err := f.NewObject(remote) + o, err := f.NewObject(ctx, remote) if err != nil { // assume it is a directory - return do(f.cipher.EncryptDirName(remote)) + return do(ctx, f.cipher.EncryptDirName(remote)) } - return do(o.(*Object).Object.Remote()) + return do(ctx, o.(*Object).Object.Remote()) } // ChangeNotify calls the passed function with a path // that has had changes. If the implementation // uses polling, it should adhere to the given interval. -func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { +func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { do := f.Fs.Features().ChangeNotify if do == nil { return @@ -680,7 +681,7 @@ func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalCha } notifyFunc(decrypted, entryType) } - do(wrappedNotifyFunc, pollIntervalChan) + do(ctx, wrappedNotifyFunc, pollIntervalChan) } // Object describes a wrapped for being read from the Fs @@ -733,7 +734,7 @@ func (o *Object) Size() int64 { // Hash returns the selected checksum of the file // If no checksum is available it returns "" -func (o *Object) Hash(ht hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { return "", hash.ErrUnsupported } @@ -743,7 +744,7 @@ func (o *Object) UnWrap() fs.Object { } // Open opens the file for read. Call Close() on the returned io.ReadCloser -func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { var openOptions []fs.OpenOption var offset, limit int64 = 0, -1 for _, option := range options { @@ -757,10 +758,10 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { openOptions = append(openOptions, option) } } - rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { + rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { if underlyingOffset == 0 && underlyingLimit < 0 { // Open with no seek - return o.Object.Open(openOptions...) + return o.Object.Open(ctx, openOptions...) } // Open stream with a range of underlyingOffset, underlyingLimit end := int64(-1) @@ -771,7 +772,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { } } newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end}) - return o.Object.Open(newOpenOptions...) + return o.Object.Open(ctx, newOpenOptions...) }, offset, limit) if err != nil { return nil, err @@ -780,17 +781,17 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { } // Update in to the object with the modTime given of the given size -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return o.Object, o.Object.Update(in, src, options...) +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return o.Object, o.Object.Update(ctx, in, src, options...) } - _, err := o.f.put(in, src, options, update) + _, err := o.f.put(ctx, in, src, options, update) return err } // newDir returns a dir with the Name decrypted -func (f *Fs) newDir(dir fs.Directory) fs.Directory { - newDir := fs.NewDirCopy(dir) +func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory { + newDir := fs.NewDirCopy(ctx, dir) remote := dir.Remote() decryptedRemote, err := f.cipher.DecryptDirName(remote) if err != nil { @@ -837,7 +838,7 @@ func (o *ObjectInfo) Size() int64 { // Hash returns the selected checksum of the file // If no checksum is available it returns "" -func (o *ObjectInfo) Hash(hash hash.Type) (string, error) { +func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) { return "", nil } diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 943ec46a5..c9ac6ecdd 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -9,6 +9,7 @@ package drive import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -565,7 +566,7 @@ func containsString(slice []string, s string) bool { // If the user fn ever returns true then it early exits with found = true // // Search params: https://developers.google.com/drive/search-parameters -func (f *Fs) list(dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) { +func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) { var query []string if !includeAll { q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly) @@ -910,6 +911,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { // NewFs constructs an Fs from the path, container:path func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -996,7 +998,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { } // Find the current root - err = f.dirCache.FindRoot(false) + err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) @@ -1004,12 +1006,12 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF) tempF.root = newRoot // Make new Fs which is the parent - err = tempF.dirCache.FindRoot(false) + err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } - _, err := tempF.NewObject(remote) + _, err := tempF.NewObject(ctx, remote) if err != nil { // unable to list folder so return old f return f, nil @@ -1164,8 +1166,8 @@ func (f *Fs) newObjectWithExportInfo( // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { - info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(remote) +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote) if err != nil { return nil, err } @@ -1183,9 +1185,9 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) { } // FindLeaf finds a directory of name leaf in the folder with ID pathID -func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID - found, err = f.list([]string{pathID}, leaf, true, false, false, func(item *drive.File) bool { + found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool { if !f.opt.SkipGdocs { _, exportName, _, isDocument := f.findExportFormat(item) if exportName == leaf { @@ -1206,7 +1208,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er } // CreateDir makes a directory with pathID as parent and name leaf -func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fmt.Println("Making", path) // Define the metadata for the directory we are going to create. createInfo := &drive.File{ @@ -1368,18 +1370,18 @@ func (f *Fs) findImportFormat(mimeType string) string { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { - err = f.dirCache.FindRoot(false) +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + err = f.dirCache.FindRoot(ctx, false) if err != nil { return nil, err } - directoryID, err := f.dirCache.FindDir(dir, false) + directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var iErr error - _, err = f.list([]string{directoryID}, "", false, false, false, func(item *drive.File) bool { + _, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool { entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item) if err != nil { iErr = err @@ -1432,7 +1434,7 @@ func (s listRSlices) Less(i, j int) bool { // In each cycle it will read up to grouping entries from the in channel without blocking. // If an error occurs it will be send to the out channel and then return. Once the in channel is closed, // nil is send to the out channel and the function returns. -func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) { +func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) { var dirs []string var paths []string @@ -1453,7 +1455,7 @@ func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- er } listRSlices{dirs, paths}.Sort() var iErr error - _, err := f.list(dirs, "", false, false, false, func(item *drive.File) bool { + _, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool { for _, parent := range item.Parents { // only handle parents that are in the requested dirs list i := sort.SearchStrings(dirs, parent) @@ -1508,17 +1510,17 @@ func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- er // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { const ( grouping = 50 inputBuffer = 1000 ) - err = f.dirCache.FindRoot(false) + err = f.dirCache.FindRoot(ctx, false) if err != nil { return err } - directoryID, err := f.dirCache.FindDir(dir, false) + directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } @@ -1562,7 +1564,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { in <- listREntry{directoryID, dir} for i := 0; i < fs.Config.Checkers; i++ { - go f.listRRunner(&wg, in, out, cb, grouping) + go f.listRRunner(ctx, &wg, in, out, cb, grouping) } go func() { // wait until the all directories are processed @@ -1636,8 +1638,8 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error // Creates a drive.File info from the parameters passed in. // // Used to create new objects -func (f *Fs) createFileInfo(remote string, modTime time.Time) (*drive.File, error) { - leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true) +func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) { + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true) if err != nil { return nil, err } @@ -1658,32 +1660,32 @@ func (f *Fs) createFileInfo(remote string, modTime time.Time) (*drive.File, erro // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - exisitingObj, err := f.NewObject(src.Remote()) +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + exisitingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: - return exisitingObj, exisitingObj.Update(in, src, options...) + return exisitingObj, exisitingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it - return f.PutUnchecked(in, src, options...) + return f.PutUnchecked(ctx, in, src, options...) default: return nil, err } } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. -func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) srcMimeType := fs.MimeTypeFromName(remote) srcExt := path.Ext(remote) exportExt := "" @@ -1705,7 +1707,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt } } - createInfo, err := f.createFileInfo(remote, modTime) + createInfo, err := f.createFileInfo(ctx, remote, modTime) if err != nil { return nil, err } @@ -1741,7 +1743,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. -func (f *Fs) MergeDirs(dirs []fs.Directory) error { +func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { if len(dirs) < 2 { return nil } @@ -1749,7 +1751,7 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error { for _, srcDir := range dirs[1:] { // list the the objects infos := []*drive.File{} - _, err := f.list([]string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool { + _, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool { infos = append(infos, info) return false }) @@ -1775,7 +1777,7 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error { } // rmdir (into trash) the now empty source directory fs.Infof(srcDir, "removing empty directory") - err = f.rmdir(srcDir.ID(), true) + err = f.rmdir(ctx, srcDir.ID(), true) if err != nil { return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir) } @@ -1784,19 +1786,19 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error { } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { - err := f.dirCache.FindRoot(true) +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + err := f.dirCache.FindRoot(ctx, true) if err != nil { return err } if dir != "" { - _, err = f.dirCache.FindDir(dir, true) + _, err = f.dirCache.FindDir(ctx, dir, true) } return err } // Rmdir deletes a directory unconditionally by ID -func (f *Fs) rmdir(directoryID string, useTrash bool) error { +func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error { return f.pacer.Call(func() (bool, error) { var err error if useTrash { @@ -1820,15 +1822,15 @@ func (f *Fs) rmdir(directoryID string, useTrash bool) error { // Rmdir deletes a directory // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { root := path.Join(f.root, dir) dc := f.dirCache - directoryID, err := dc.FindDir(dir, false) + directoryID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } var trashedFiles = false - found, err := f.list([]string{directoryID}, "", false, false, true, func(item *drive.File) bool { + found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool { if !item.Trashed { fs.Debugf(dir, "Rmdir: contains file: %q", item.Name) return true @@ -1847,7 +1849,7 @@ func (f *Fs) Rmdir(dir string) error { // trash the directory if it had trashed files // in or the user wants to trash, otherwise // delete it. - err = f.rmdir(directoryID, trashedFiles || f.opt.UseTrash) + err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash) if err != nil { return err } @@ -1873,7 +1875,7 @@ func (f *Fs) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { var srcObj *baseObject ext := "" switch src := src.(type) { @@ -1897,9 +1899,9 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } // Look to see if there is an existing object - existingObject, _ := f.NewObject(remote) + existingObject, _ := f.NewObject(ctx, remote) - createInfo, err := f.createFileInfo(remote, src.ModTime()) + createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx)) if err != nil { return nil, err } @@ -1926,7 +1928,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { return nil, err } if existingObject != nil { - err = existingObject.Remove() + err = existingObject.Remove(ctx) if err != nil { fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err) } @@ -1939,11 +1941,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { if f.root == "" { return errors.New("can't purge root directory") } - err := f.dirCache.FindRoot(false) + err := f.dirCache.FindRoot(ctx, false) if err != nil { return err } @@ -1972,7 +1974,7 @@ func (f *Fs) Purge() error { } // CleanUp empties the trash -func (f *Fs) CleanUp() error { +func (f *Fs) CleanUp(ctx context.Context) error { err := f.pacer.Call(func() (bool, error) { err := f.svc.Files.EmptyTrash().Do() return shouldRetry(err) @@ -1985,7 +1987,7 @@ func (f *Fs) CleanUp() error { } // About gets quota information -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { if f.isTeamDrive { // Teamdrives don't appear to have a usage API so just return empty return &fs.Usage{}, nil @@ -2021,7 +2023,7 @@ func (f *Fs) About() (*fs.Usage, error) { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { var srcObj *baseObject ext := "" switch src := src.(type) { @@ -2044,13 +2046,13 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { remote = remote[:len(remote)-len(ext)] } - _, srcParentID, err := srcObj.fs.dirCache.FindPath(src.Remote(), false) + _, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false) if err != nil { return nil, err } // Temporary Object under construction - dstInfo, err := f.createFileInfo(remote, src.ModTime()) + dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx)) if err != nil { return nil, err } @@ -2095,13 +2097,13 @@ func (f *Fs) ShouldSupportTeamDrives(src fs.Object) (bool, error) { } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. -func (f *Fs) PublicLink(remote string) (link string, err error) { - id, err := f.dirCache.FindDir(remote, false) +func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { + id, err := f.dirCache.FindDir(ctx, remote, false) if err == nil { fs.Debugf(f, "attempting to share directory '%s'", remote) } else { fs.Debugf(f, "attempting to share single file '%s'", remote) - o, err := f.NewObject(remote) + o, err := f.NewObject(ctx, remote) if err != nil { return "", err } @@ -2137,7 +2139,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -2153,14 +2155,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // find the root src directory - err := srcFs.dirCache.FindRoot(false) + err := srcFs.dirCache.FindRoot(ctx, false) if err != nil { return err } // find the root dst directory if dstRemote != "" { - err = f.dirCache.FindRoot(true) + err = f.dirCache.FindRoot(ctx, true) if err != nil { return err } @@ -2176,14 +2178,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { if dstRemote == "" { findPath = f.root } - leaf, dstDirectoryID, err = f.dirCache.FindPath(findPath, true) + leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true) if err != nil { return err } // Check destination does not exist if dstRemote != "" { - _, err = f.dirCache.FindDir(dstRemote, false) + _, err = f.dirCache.FindDir(ctx, dstRemote, false) if err == fs.ErrorDirNotFound { // OK } else if err != nil { @@ -2198,14 +2200,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { if srcRemote == "" { srcDirectoryID, err = srcFs.dirCache.RootParentID() } else { - _, srcDirectoryID, err = srcFs.dirCache.FindPath(srcRemote, false) + _, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false) } if err != nil { return err } // Find ID of src - srcID, err := srcFs.dirCache.FindDir(srcRemote, false) + srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) if err != nil { return err } @@ -2236,7 +2238,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { // Automatically restarts itself in case of unexpected behavior of the remote. // // Close the returned channel to stop being notified. -func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { +func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { go func() { // get the StartPageToken early so all changes from now on get processed startPageToken, err := f.changeNotifyStartPageToken() @@ -2411,13 +2413,13 @@ func (o *baseObject) Remote() string { } // Hash returns the Md5sum of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } return o.md5sum, nil } -func (o *baseObject) Hash(t hash.Type) (string, error) { +func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } @@ -2430,15 +2432,15 @@ func (o *baseObject) Size() int64 { } // getRemoteInfo returns a drive.File for the remote -func (f *Fs) getRemoteInfo(remote string) (info *drive.File, err error) { - info, _, _, _, _, err = f.getRemoteInfoWithExport(remote) +func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) { + info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote) return } // getRemoteInfoWithExport returns a drive.File and the export settings for the remote -func (f *Fs) getRemoteInfoWithExport(remote string) ( +func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) ( info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) { - leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, false) + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, "", "", "", false, fs.ErrorObjectNotFound @@ -2446,7 +2448,7 @@ func (f *Fs) getRemoteInfoWithExport(remote string) ( return nil, "", "", "", false, err } - found, err := f.list([]string{directoryID}, leaf, false, true, false, func(item *drive.File) bool { + found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool { if !f.opt.SkipGdocs { extension, exportName, exportMimeType, isDocument = f.findExportFormat(item) if exportName == leaf { @@ -2477,7 +2479,7 @@ func (f *Fs) getRemoteInfoWithExport(remote string) ( // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *baseObject) ModTime() time.Time { +func (o *baseObject) ModTime(ctx context.Context) time.Time { modTime, err := time.Parse(timeFormatIn, o.modifiedDate) if err != nil { fs.Debugf(o, "Failed to read mtime from object: %v", err) @@ -2487,7 +2489,7 @@ func (o *baseObject) ModTime() time.Time { } // SetModTime sets the modification time of the drive fs object -func (o *baseObject) SetModTime(modTime time.Time) error { +func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error { // New metadata updateInfo := &drive.File{ ModifiedTime: modTime.Format(timeFormatOut), @@ -2620,7 +2622,7 @@ func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadClose } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { if o.v2Download { var v2File *drive_v2.File err = o.fs.pacer.Call(func() (bool, error) { @@ -2638,7 +2640,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { } return o.baseObject.open(o.url, options...) } -func (o *documentObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { // Update the size with what we are reading as it can change from // the HEAD in the listing to this GET. This stops rclone marking // the transfer as corrupted. @@ -2670,7 +2672,7 @@ func (o *documentObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err e } return } -func (o *linkObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var offset, limit int64 = 0, -1 var data = o.content for _, option := range options { @@ -2722,11 +2724,11 @@ func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io // Copy the reader into the object updating modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - srcMimeType := fs.MimeType(src) +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + srcMimeType := fs.MimeType(ctx, src) updateInfo := &drive.File{ MimeType: srcMimeType, - ModifiedTime: src.ModTime().Format(timeFormatOut), + ModifiedTime: src.ModTime(ctx).Format(timeFormatOut), } info, err := o.baseObject.update(updateInfo, srcMimeType, in, src) if err != nil { @@ -2745,12 +2747,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio return nil } -func (o *documentObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - srcMimeType := fs.MimeType(src) +func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + srcMimeType := fs.MimeType(ctx, src) importMimeType := "" updateInfo := &drive.File{ MimeType: srcMimeType, - ModifiedTime: src.ModTime().Format(timeFormatOut), + ModifiedTime: src.ModTime(ctx).Format(timeFormatOut), } if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs { @@ -2787,12 +2789,12 @@ func (o *documentObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.O return nil } -func (o *linkObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { return errors.New("cannot update link files") } // Remove an object -func (o *baseObject) Remove() error { +func (o *baseObject) Remove(ctx context.Context) error { var err error err = o.fs.pacer.Call(func() (bool, error) { if o.fs.opt.UseTrash { @@ -2815,7 +2817,7 @@ func (o *baseObject) Remove() error { } // MimeType of an Object if known, "" otherwise -func (o *baseObject) MimeType() string { +func (o *baseObject) MimeType(ctx context.Context) string { return o.mimeType } diff --git a/backend/drive/drive_internal_test.go b/backend/drive/drive_internal_test.go index 542fd5538..65c84fe0e 100644 --- a/backend/drive/drive_internal_test.go +++ b/backend/drive/drive_internal_test.go @@ -2,6 +2,7 @@ package drive import ( "bytes" + "context" "encoding/json" "io" "io/ioutil" @@ -195,7 +196,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) { _, f.importMimeTypes, err = parseExtensions("odt,ods,doc") require.NoError(t, err) - err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc") + err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc") require.NoError(t, err) } @@ -209,7 +210,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) { _, f.importMimeTypes, err = parseExtensions("odt,ods,doc") require.NoError(t, err) - err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods") + err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods") require.NoError(t, err) } @@ -220,10 +221,10 @@ func (f *Fs) InternalTestDocumentExport(t *testing.T) { f.exportExtensions, _, err = parseExtensions("txt") require.NoError(t, err) - obj, err := f.NewObject("example2.txt") + obj, err := f.NewObject(context.Background(), "example2.txt") require.NoError(t, err) - rc, err := obj.Open() + rc, err := obj.Open(context.Background()) require.NoError(t, err) defer func() { require.NoError(t, rc.Close()) }() @@ -246,10 +247,10 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) { f.exportExtensions, _, err = parseExtensions("link.html") require.NoError(t, err) - obj, err := f.NewObject("example2.link.html") + obj, err := f.NewObject(context.Background(), "example2.link.html") require.NoError(t, err) - rc, err := obj.Open() + rc, err := obj.Open(context.Background()) require.NoError(t, err) defer func() { require.NoError(t, rc.Close()) }() diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index 7d4748d97..9d2f8b82c 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -22,6 +22,7 @@ of path_display and all will be well. */ import ( + "context" "fmt" "io" "log" @@ -441,7 +442,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } @@ -454,7 +455,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { root := f.slashRoot if dir != "" { root += "/" + dir @@ -541,22 +542,22 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction o := &Object{ fs: f, remote: src.Remote(), } - return o, o.Update(in, src, options...) + return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { root := path.Join(f.slashRoot, dir) // can't create or run metadata on root @@ -586,7 +587,7 @@ func (f *Fs) Mkdir(dir string) error { // Rmdir deletes the container // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { root := path.Join(f.slashRoot, dir) // can't remove root @@ -642,7 +643,7 @@ func (f *Fs) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") @@ -687,7 +688,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() (err error) { +func (f *Fs) Purge(ctx context.Context) (err error) { // Let dropbox delete the filesystem tree err = f.pacer.Call(func() (bool, error) { _, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot}) @@ -705,7 +706,7 @@ func (f *Fs) Purge() (err error) { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") @@ -745,7 +746,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. -func (f *Fs) PublicLink(remote string) (link string, err error) { +func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { absPath := "/" + path.Join(f.Root(), remote) fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath) createArg := sharing.CreateSharedLinkWithSettingsArg{ @@ -798,7 +799,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -834,7 +835,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // About gets quota information -func (f *Fs) About() (usage *fs.Usage, err error) { +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { var q *users.SpaceUsage err = f.pacer.Call(func() (bool, error) { q, err = f.users.GetSpaceUsage() @@ -886,7 +887,7 @@ func (o *Object) Remote() string { } // Hash returns the dropbox special hash -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.Dropbox { return "", hash.ErrUnsupported } @@ -948,7 +949,7 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData() if err != nil { fs.Debugf(o, "Failed to read metadata: %v", err) @@ -960,7 +961,7 @@ func (o *Object) ModTime() time.Time { // SetModTime sets the modification time of the local fs object // // Commits the datastore -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { // Dropbox doesn't have a way of doing this so returning this // error will cause the file to be deleted first then // re-uploaded to set the time. @@ -973,7 +974,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { headers := fs.OpenOptionHeaders(options) arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers} err = o.fs.pacer.Call(func() (bool, error) { @@ -1099,7 +1100,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size // Copy the reader into the object updating modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { remote := o.remotePath() if ignoredFiles.MatchString(remote) { fs.Logf(o, "File name disallowed - not uploading") @@ -1108,7 +1109,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio commitInfo := files.NewCommitInfo(o.remotePath()) commitInfo.Mode.Tag = "overwrite" // The Dropbox API only accepts timestamps in UTC with second precision. - commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second) + commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second) size := src.Size() var err error @@ -1128,7 +1129,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() (err error) { +func (o *Object) Remove(ctx context.Context) (err error) { err = o.fs.pacer.Call(func() (bool, error) { _, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()}) return shouldRetry(err) diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go index 9ed69dc91..dd6230c1f 100644 --- a/backend/ftp/ftp.go +++ b/backend/ftp/ftp.go @@ -2,6 +2,7 @@ package ftp import ( + "context" "crypto/tls" "io" "net/textproto" @@ -202,6 +203,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) { // NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) { + ctx := context.Background() // defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err) // Parse config into Options struct opt := new(Options) @@ -254,7 +256,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) { if f.root == "." { f.root = "" } - _, err := f.NewObject(remote) + _, err := f.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { // File doesn't exist so return old f @@ -319,7 +321,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) { // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (o fs.Object, err error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) entry, err := f.findItem(remote) if err != nil { @@ -363,7 +365,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // defer fs.Trace(dir, "curlevel=%d", curlevel)("") c, err := f.getFtpConnection() if err != nil { @@ -453,7 +455,7 @@ func (f *Fs) Precision() time.Duration { // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // fs.Debugf(f, "Trying to put file %s", src.Remote()) err := f.mkParentDir(src.Remote()) if err != nil { @@ -463,13 +465,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. fs: f, remote: src.Remote(), } - err = o.Update(in, src, options...) + err = o.Update(ctx, in, src, options...) return o, err } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // getInfo reads the FileInfo for a path @@ -547,7 +549,7 @@ func (f *Fs) mkParentDir(remote string) error { } // Mkdir creates the directory if it doesn't exist -func (f *Fs) Mkdir(dir string) (err error) { +func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { // defer fs.Trace(dir, "")("err=%v", &err) root := path.Join(f.root, dir) return f.mkdir(root) @@ -556,7 +558,7 @@ func (f *Fs) Mkdir(dir string) (err error) { // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { c, err := f.getFtpConnection() if err != nil { return errors.Wrap(translateErrorFile(err), "Rmdir") @@ -567,7 +569,7 @@ func (f *Fs) Rmdir(dir string) error { } // Move renames a remote file object -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") @@ -589,7 +591,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { if err != nil { return nil, errors.Wrap(err, "Move Rename failed") } - dstObj, err := f.NewObject(remote) + dstObj, err := f.NewObject(ctx, remote) if err != nil { return nil, errors.Wrap(err, "Move NewObject failed") } @@ -604,7 +606,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -667,7 +669,7 @@ func (o *Object) Remote() string { } // Hash returns the hash of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } @@ -677,12 +679,12 @@ func (o *Object) Size() int64 { } // ModTime returns the modification time of the object -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { return o.info.ModTime } // SetModTime sets the modification time of the object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return nil } @@ -743,7 +745,7 @@ func (f *ftpReadCloser) Close() error { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { // defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err) path := path.Join(o.fs.root, o.remote) var offset, limit int64 = 0, -1 @@ -777,7 +779,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { // Copy the reader into the object updating modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { // defer fs.Trace(o, "src=%v", src)("err=%v", &err) path := path.Join(o.fs.root, o.remote) // remove the file if upload failed @@ -787,7 +789,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // may still be dealing with it for a moment. A sleep isn't ideal but I haven't been // able to think of a better method to find out if the server has finished - ncw time.Sleep(1 * time.Second) - removeErr := o.Remove() + removeErr := o.Remove(ctx) if removeErr != nil { fs.Debugf(o, "Failed to remove: %v", removeErr) } else { @@ -813,7 +815,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() (err error) { +func (o *Object) Remove(ctx context.Context) (err error) { // defer fs.Trace(o, "")("err=%v", &err) path := path.Join(o.fs.root, o.remote) // Check if it's a directory or a file @@ -822,7 +824,7 @@ func (o *Object) Remove() (err error) { return err } if info.IsDir { - err = o.fs.Rmdir(o.remote) + err = o.fs.Rmdir(ctx, o.remote) } else { c, err := o.fs.getFtpConnection() if err != nil { diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go index 7a5482442..9b613529f 100644 --- a/backend/googlecloudstorage/googlecloudstorage.go +++ b/backend/googlecloudstorage/googlecloudstorage.go @@ -473,7 +473,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } @@ -485,7 +485,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error // dir is the starting directory, "" for root // // Set recurse to read sub directories -func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) { +func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err error) { root := f.root rootLength := len(root) if dir != "" { @@ -574,9 +574,9 @@ func (f *Fs) markBucketOK() { } // listDir lists a single directory -func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // List the objects - err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error { + err = f.list(ctx, dir, false, func(remote string, object *storage.Object, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err @@ -633,11 +633,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if f.bucket == "" { return f.listBuckets(dir) } - return f.listDir(dir) + return f.listDir(ctx, dir) } // ListR lists the objects and directories of the Fs starting @@ -656,12 +656,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { if f.bucket == "" { return fs.ErrorListBucketRequired } list := walk.NewListRHelper(callback) - err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error { + err = f.list(ctx, dir, true, func(remote string, object *storage.Object, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err @@ -681,22 +681,22 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction o := &Object{ fs: f, remote: src.Remote(), } - return o, o.Update(in, src, options...) + return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // Mkdir creates the bucket if it doesn't exist -func (f *Fs) Mkdir(dir string) (err error) { +func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.bucketOK { @@ -755,7 +755,7 @@ func (f *Fs) Mkdir(dir string) (err error) { // // Returns an error if it isn't empty: Error 409: The bucket you tried // to delete was not empty. -func (f *Fs) Rmdir(dir string) (err error) { +func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.root != "" || dir != "" { @@ -785,8 +785,8 @@ func (f *Fs) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { - err := f.Mkdir("") +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + err := f.Mkdir(ctx, "") if err != nil { return nil, err } @@ -845,7 +845,7 @@ func (o *Object) Remote() string { } // Hash returns the Md5sum of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } @@ -919,7 +919,7 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData() if err != nil { // fs.Logf(o, "Failed to read metadata: %v", err) @@ -936,7 +936,7 @@ func metadataFromModTime(modTime time.Time) map[string]string { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) (err error) { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { // This only adds metadata so will perserve other metadata object := storage.Object{ Bucket: o.fs.bucket, @@ -961,7 +961,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { req, err := http.NewRequest("GET", o.url, nil) if err != nil { return nil, err @@ -992,17 +992,17 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - err := o.fs.Mkdir("") +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + err := o.fs.Mkdir(ctx, "") if err != nil { return err } - modTime := src.ModTime() + modTime := src.ModTime(ctx) object := storage.Object{ Bucket: o.fs.bucket, Name: o.fs.root + o.remote, - ContentType: fs.MimeType(src), + ContentType: fs.MimeType(ctx, src), Metadata: metadataFromModTime(modTime), } var newObject *storage.Object @@ -1023,7 +1023,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() (err error) { +func (o *Object) Remove(ctx context.Context) (err error) { err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do() return shouldRetry(err) @@ -1032,7 +1032,7 @@ func (o *Object) Remove() (err error) { } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { return o.mimeType } diff --git a/backend/http/http.go b/backend/http/http.go index 3f897f737..da5e412aa 100644 --- a/backend/http/http.go +++ b/backend/http/http.go @@ -5,6 +5,7 @@ package http import ( + "context" "io" "mime" "net/http" @@ -207,7 +208,7 @@ func (f *Fs) Precision() time.Duration { } // NewObject creates a new remote http file object -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { o := &Object{ fs: f, remote: remote, @@ -359,7 +360,7 @@ func (f *Fs) readDir(dir string) (names []string, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if !strings.HasSuffix(dir, "/") && dir != "" { dir += "/" } @@ -399,12 +400,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return nil, errorReadOnly } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return nil, errorReadOnly } @@ -427,7 +428,7 @@ func (o *Object) Remote() string { } // Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes -func (o *Object) Hash(r hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { return "", hash.ErrUnsupported } @@ -437,7 +438,7 @@ func (o *Object) Size() int64 { } // ModTime returns the modification time of the remote http file -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } @@ -480,7 +481,7 @@ func (o *Object) stat() error { // SetModTime sets the modification and access time to the specified time // // it also updates the info field -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return errorReadOnly } @@ -490,7 +491,7 @@ func (o *Object) Storable() bool { } // Open a remote http file object for reading. Seek is supported -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { url := o.url() req, err := http.NewRequest("GET", url, nil) if err != nil { @@ -517,27 +518,27 @@ func (f *Fs) Hashes() hash.Set { } // Mkdir makes the root directory of the Fs object -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { return errorReadOnly } // Remove a remote http file object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { return errorReadOnly } // Rmdir removes the root directory of the Fs object -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { return errorReadOnly } // Update in to the object with the modTime given of the given size -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { return errorReadOnly } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { return o.contentType } diff --git a/backend/http/http_internal_test.go b/backend/http/http_internal_test.go index 2c489879d..6c3505236 100644 --- a/backend/http/http_internal_test.go +++ b/backend/http/http_internal_test.go @@ -1,6 +1,7 @@ package http import ( + "context" "fmt" "io/ioutil" "net/http" @@ -64,7 +65,7 @@ func prepare(t *testing.T) (fs.Fs, func()) { } func testListRoot(t *testing.T, f fs.Fs, noSlash bool) { - entries, err := f.List("") + entries, err := f.List(context.Background(), "") require.NoError(t, err) sort.Sort(entries) @@ -120,7 +121,7 @@ func TestListSubDir(t *testing.T) { f, tidy := prepare(t) defer tidy() - entries, err := f.List("three") + entries, err := f.List(context.Background(), "three") require.NoError(t, err) sort.Sort(entries) @@ -138,7 +139,7 @@ func TestNewObject(t *testing.T) { f, tidy := prepare(t) defer tidy() - o, err := f.NewObject("four/under four.txt") + o, err := f.NewObject(context.Background(), "four/under four.txt") require.NoError(t, err) assert.Equal(t, "four/under four.txt", o.Remote()) @@ -148,7 +149,7 @@ func TestNewObject(t *testing.T) { // Test the time is correct on the object - tObj := o.ModTime() + tObj := o.ModTime(context.Background()) fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt")) require.NoError(t, err) @@ -158,7 +159,7 @@ func TestNewObject(t *testing.T) { assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second)) // check object not found - o, err = f.NewObject("not found.txt") + o, err = f.NewObject(context.Background(), "not found.txt") assert.Nil(t, o) assert.Equal(t, fs.ErrorObjectNotFound, err) } @@ -167,11 +168,11 @@ func TestOpen(t *testing.T) { f, tidy := prepare(t) defer tidy() - o, err := f.NewObject("four/under four.txt") + o, err := f.NewObject(context.Background(), "four/under four.txt") require.NoError(t, err) // Test normal read - fd, err := o.Open() + fd, err := o.Open(context.Background()) require.NoError(t, err) data, err := ioutil.ReadAll(fd) require.NoError(t, err) @@ -179,7 +180,7 @@ func TestOpen(t *testing.T) { assert.Equal(t, "beetroot\n", string(data)) // Test with range request - fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5}) + fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5}) require.NoError(t, err) data, err = ioutil.ReadAll(fd) require.NoError(t, err) @@ -191,12 +192,12 @@ func TestMimeType(t *testing.T) { f, tidy := prepare(t) defer tidy() - o, err := f.NewObject("four/under four.txt") + o, err := f.NewObject(context.Background(), "four/under four.txt") require.NoError(t, err) do, ok := o.(fs.MimeTyper) require.True(t, ok) - assert.Equal(t, "text/plain; charset=utf-8", do.MimeType()) + assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background())) } func TestIsAFileRoot(t *testing.T) { @@ -216,7 +217,7 @@ func TestIsAFileSubDir(t *testing.T) { f, err := NewFs(remoteName, "three/underthree.txt", m) assert.Equal(t, err, fs.ErrorIsFile) - entries, err := f.List("") + entries, err := f.List(context.Background(), "") require.NoError(t, err) sort.Sort(entries) diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go index 672aa6518..e4d4bd6de 100644 --- a/backend/jottacloud/jottacloud.go +++ b/backend/jottacloud/jottacloud.go @@ -2,6 +2,7 @@ package jottacloud import ( "bytes" + "context" "crypto/md5" "encoding/hex" "fmt" @@ -542,7 +543,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { if f.root == "." { f.root = "" } - _, err := f.NewObject(remote) + _, err := f.NewObject(context.TODO(), remote) if err != nil { if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { // File doesn't exist so return old f @@ -580,7 +581,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } @@ -617,7 +618,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { //fmt.Printf("List: %s\n", f.filePath(dir)) opts := rest.Opts{ Method: "GET", @@ -734,7 +735,7 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { opts := rest.Opts{ Method: "GET", Path: f.filePath(dir), @@ -787,17 +788,17 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { if f.opt.Device != "Jotta" { return nil, errors.New("upload not supported for devices other than Jotta") } - o := f.createObject(src.Remote(), src.ModTime(), src.Size()) - return o, o.Update(in, src, options...) + o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size()) + return o, o.Update(ctx, in, src, options...) } // mkParentDir makes the parent of the native path dirPath if // necessary and any directories above that -func (f *Fs) mkParentDir(dirPath string) error { +func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error { // defer log.Trace(dirPath, "")("") // chop off trailing / if it exists if strings.HasSuffix(dirPath, "/") { @@ -807,25 +808,25 @@ func (f *Fs) mkParentDir(dirPath string) error { if parent == "." { parent = "" } - return f.Mkdir(parent) + return f.Mkdir(ctx, parent) } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { _, err := f.CreateDir(dir) return err } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in -func (f *Fs) purgeCheck(dir string, check bool) (err error) { +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } // check that the directory exists - entries, err := f.List(dir) + entries, err := f.List(ctx, dir) if err != nil { return err } @@ -865,8 +866,8 @@ func (f *Fs) purgeCheck(dir string, check bool) (err error) { // Rmdir deletes the root folder // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { - return f.purgeCheck(dir, true) +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs @@ -879,8 +880,8 @@ func (f *Fs) Precision() time.Duration { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { - return f.purgeCheck("", false) +func (f *Fs) Purge(ctx context.Context) error { + return f.purgeCheck(ctx, "", false) } // copyOrMoves copies or moves directories or files depending on the method parameter @@ -913,14 +914,14 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantMove } - err := f.mkParentDir(remote) + err := f.mkParentDir(ctx, remote) if err != nil { return nil, err } @@ -943,14 +944,14 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } - err := f.mkParentDir(remote) + err := f.mkParentDir(ctx, remote) if err != nil { return nil, err } @@ -972,7 +973,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -989,7 +990,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { //fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath) var err error - _, err = f.List(dstRemote) + _, err = f.List(ctx, dstRemote) if err == fs.ErrorDirNotFound { // OK } else if err != nil { @@ -1007,7 +1008,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // PublicLink generates a public link to the remote path (usually readable by anyone) -func (f *Fs) PublicLink(remote string) (link string, err error) { +func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { opts := rest.Opts{ Method: "GET", Path: f.filePath(remote), @@ -1053,7 +1054,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) { } // About gets quota information -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { info, err := getAccountInfo(f.srv, f.user) if err != nil { return nil, err @@ -1095,7 +1096,7 @@ func (o *Object) Remote() string { } // Hash returns the MD5 of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } @@ -1113,7 +1114,7 @@ func (o *Object) Size() int64 { } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { return o.mimeType } @@ -1145,7 +1146,7 @@ func (o *Object) readMetaData(force bool) (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData(false) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) @@ -1155,7 +1156,7 @@ func (o *Object) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } @@ -1165,7 +1166,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { fs.FixRangeOption(options, o.size) var resp *http.Response opts := rest.Opts{ @@ -1249,9 +1250,9 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, // If existing is set then it updates the object rather than creating a new one // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { size := src.Size() - md5String, err := src.Hash(hash.MD5) + md5String, err := src.Hash(ctx, hash.MD5) if err != nil || md5String == "" { // unwrap the accounting from the input, we use wrap to put it // back on after the buffering @@ -1274,7 +1275,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio Path: "allocate", ExtraHeaders: make(map[string]string), } - fileDate := api.Time(src.ModTime()).APIString() + fileDate := api.Time(src.ModTime(ctx)).APIString() // the allocate request var request = api.AllocateFileRequest{ @@ -1338,7 +1339,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { opts := rest.Opts{ Method: "POST", Path: o.filePath(), diff --git a/backend/koofr/koofr.go b/backend/koofr/koofr.go index cc2221bf6..10488b426 100644 --- a/backend/koofr/koofr.go +++ b/backend/koofr/koofr.go @@ -1,6 +1,7 @@ package koofr import ( + "context" "encoding/base64" "errors" "fmt" @@ -105,7 +106,7 @@ func (o *Object) Remote() string { } // ModTime returns the modification time of the Object -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000) } @@ -120,7 +121,7 @@ func (o *Object) Fs() fs.Info { } // Hash returns an MD5 hash of the Object -func (o *Object) Hash(typ hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) { if typ == hash.MD5 { return o.info.Hash, nil } @@ -138,12 +139,12 @@ func (o *Object) Storable() bool { } // SetModTime is not supported -func (o *Object) SetModTime(mtime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error { return nil } // Open opens the Object for reading -func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { var sOff, eOff int64 = 0, -1 for _, option := range options { @@ -177,7 +178,7 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { } // Update updates the Object contents -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { putopts := &koofrclient.PutFilter{ ForceOverwrite: true, NoRename: true, @@ -199,7 +200,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove deletes the remote Object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath()) } @@ -297,7 +298,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) { } // List returns a list of items in a directory -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { files, err := f.client.FilesList(f.mountID, f.fullPath(dir)) if err != nil { return nil, translateErrorsDir(err) @@ -318,7 +319,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { } // NewObject creates a new remote Object for a given remote path -func (f *Fs) NewObject(remote string) (obj fs.Object, err error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) { info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote)) if err != nil { return nil, translateErrorsObject(err) @@ -334,7 +335,7 @@ func (f *Fs) NewObject(remote string) (obj fs.Object, err error) { } // Put updates a remote Object -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) { putopts := &koofrclient.PutFilter{ ForceOverwrite: true, NoRename: true, @@ -359,8 +360,8 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj } // PutStream updates a remote Object with a stream of unknown size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // isBadRequest is a predicate which holds true iff the error returned was @@ -436,13 +437,13 @@ func (f *Fs) mkdir(fullPath string) error { // Mkdir creates a directory at the given remote path. Creates ancestors if // necessary -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { fullPath := f.fullPath(dir) return f.mkdir(fullPath) } // Rmdir removes an (empty) directory at the given remote path -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { files, err := f.client.FilesList(f.mountID, f.fullPath(dir)) if err != nil { return translateErrorsDir(err) @@ -458,7 +459,7 @@ func (f *Fs) Rmdir(dir string) error { } // Copy copies a remote Object to the given path -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { dstFullPath := f.fullPath(remote) dstDir := dir(dstFullPath) err := f.mkdir(dstDir) @@ -471,11 +472,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { if err != nil { return nil, fs.ErrorCantCopy } - return f.NewObject(remote) + return f.NewObject(ctx, remote) } // Move moves a remote Object to the given path -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj := src.(*Object) dstFullPath := f.fullPath(remote) dstDir := dir(dstFullPath) @@ -488,11 +489,11 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { if err != nil { return nil, fs.ErrorCantMove } - return f.NewObject(remote) + return f.NewObject(ctx, remote) } // DirMove moves a remote directory to the given path -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs := src.(*Fs) srcFullPath := srcFs.fullPath(srcRemote) dstFullPath := f.fullPath(dstRemote) @@ -512,7 +513,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // About reports space usage (with a MB precision) -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { mount, err := f.client.MountsDetails(f.mountID) if err != nil { return nil, err @@ -528,7 +529,7 @@ func (f *Fs) About() (*fs.Usage, error) { } // Purge purges the complete Fs -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath(""))) return err } @@ -580,7 +581,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, } // PublicLink creates a public link to the remote path -func (f *Fs) PublicLink(remote string) (string, error) { +func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) { linkData, err := createLink(f.client, f.mountID, f.fullPath(remote)) if err != nil { return "", translateErrorsDir(err) diff --git a/backend/local/about_unix.go b/backend/local/about_unix.go index 5db54f6de..93a4394c6 100644 --- a/backend/local/about_unix.go +++ b/backend/local/about_unix.go @@ -3,6 +3,7 @@ package local import ( + "context" "syscall" "github.com/ncw/rclone/fs" @@ -10,7 +11,7 @@ import ( ) // About gets quota information -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { var s syscall.Statfs_t err := syscall.Statfs(f.root, &s) if err != nil { diff --git a/backend/local/about_windows.go b/backend/local/about_windows.go index 4c9dcec3b..720885d76 100644 --- a/backend/local/about_windows.go +++ b/backend/local/about_windows.go @@ -3,6 +3,7 @@ package local import ( + "context" "syscall" "unsafe" @@ -13,7 +14,7 @@ import ( var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW") // About gets quota information -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { var available, total, free int64 _, _, e1 := getFreeDiskSpace.Call( uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))), diff --git a/backend/local/local.go b/backend/local/local.go index 760c3be52..e0e3925dd 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -3,6 +3,7 @@ package local import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -329,7 +330,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, "", nil) } @@ -342,7 +343,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { dir = f.dirNames.Load(dir) fsDirPath := f.cleanPath(filepath.Join(f.root, dir)) @@ -507,11 +508,11 @@ func (m *mapper) Save(in, out string) string { } // Put the Object to the local filesystem -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() // Temporary Object under construction - info filled in by Update() o := f.newObject(remote, "") - err := o.Update(in, src, options...) + err := o.Update(ctx, in, src, options...) if err != nil { return nil, err } @@ -519,12 +520,12 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // Mkdir creates the directory if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { // FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go root := f.cleanPath(filepath.Join(f.root, dir)) err := os.MkdirAll(root, 0777) @@ -544,7 +545,7 @@ func (f *Fs) Mkdir(dir string) error { // Rmdir removes the directory // // If it isn't empty it will return an error -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { root := f.cleanPath(filepath.Join(f.root, dir)) return os.Remove(root) } @@ -600,7 +601,7 @@ func (f *Fs) readPrecision() (precision time.Duration) { } // If it matches - have found the precision - // fmt.Println("compare", fi.ModTime(), t) + // fmt.Println("compare", fi.ModTime(ctx), t) if fi.ModTime().Equal(t) { // fmt.Println("Precision detected as", duration) return duration @@ -614,7 +615,7 @@ func (f *Fs) readPrecision() (precision time.Duration) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { fi, err := f.lstat(f.root) if err != nil { return err @@ -634,7 +635,7 @@ func (f *Fs) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") @@ -693,7 +694,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -758,7 +759,7 @@ func (o *Object) Remote() string { } // Hash returns the requested hash of a file as a lowercase hex string -func (o *Object) Hash(r hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { // Check that the underlying file hasn't changed oldtime := o.modTime oldsize := o.size @@ -809,12 +810,12 @@ func (o *Object) Size() int64 { } // ModTime returns the modification time of the object -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { var err error if o.translatedLink { err = lChtimes(o.path, modTime, modTime) @@ -910,7 +911,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var offset, limit int64 = 0, -1 hashes := hash.Supported for _, option := range options { @@ -974,7 +975,7 @@ func (nwc nopWriterCloser) Close() error { } // Update the object from in with modTime and size -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { var out io.WriteCloser hashes := hash.Supported @@ -1055,7 +1056,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio o.fs.objectHashesMu.Unlock() // Set the mtime - err = o.SetModTime(src.ModTime()) + err = o.SetModTime(ctx, src.ModTime(ctx)) if err != nil { return err } @@ -1069,7 +1070,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // Pass in the remote desired and the size if known. // // It truncates any existing object -func (f *Fs) OpenWriterAt(remote string, size int64) (fs.WriterAtCloser, error) { +func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) { // Temporary Object under construction o := f.newObject(remote, "") @@ -1119,7 +1120,7 @@ func (o *Object) lstat() error { } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { return remove(o.path) } diff --git a/backend/local/local_internal_test.go b/backend/local/local_internal_test.go index 847f199c5..a78289ec7 100644 --- a/backend/local/local_internal_test.go +++ b/backend/local/local_internal_test.go @@ -1,6 +1,7 @@ package local import ( + "context" "io/ioutil" "os" "path" @@ -83,6 +84,7 @@ func TestUpdatingCheck(t *testing.T) { } func TestSymlink(t *testing.T) { + ctx := context.Background() r := fstest.NewRun(t) defer r.Finalise() f := r.Flocal.(*Fs) @@ -131,7 +133,7 @@ func TestSymlink(t *testing.T) { // Create a symlink modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z") - file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false) + file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false) if runtime.GOOS == "windows" { file3.Size = 0 // symlinks are 0 length under Windows } @@ -150,7 +152,7 @@ func TestSymlink(t *testing.T) { assert.Equal(t, "file.txt", linkText) // Check that NewObject gets the correct object - o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix) + o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix) require.NoError(t, err) assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote()) if runtime.GOOS != "windows" { @@ -158,11 +160,11 @@ func TestSymlink(t *testing.T) { } // Check that NewObject doesn't see the non suffixed version - _, err = r.Flocal.NewObject("symlink2.txt") + _, err = r.Flocal.NewObject(ctx, "symlink2.txt") require.Equal(t, fs.ErrorObjectNotFound, err) // Check reading the object - in, err := o.Open() + in, err := o.Open(ctx) require.NoError(t, err) contents, err := ioutil.ReadAll(in) require.NoError(t, err) @@ -170,7 +172,7 @@ func TestSymlink(t *testing.T) { require.NoError(t, in.Close()) // Check reading the object with range - in, err = o.Open(&fs.RangeOption{Start: 2, End: 5}) + in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5}) require.NoError(t, err) contents, err = ioutil.ReadAll(in) require.NoError(t, err) diff --git a/backend/mega/mega.go b/backend/mega/mega.go index e191baf65..0fb960c80 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -16,6 +16,7 @@ Improvements: */ import ( + "context" "fmt" "io" "path" @@ -403,10 +404,10 @@ func (f *Fs) clearRoot() { } // CleanUp deletes all files currently in trash -func (f *Fs) CleanUp() (err error) { +func (f *Fs) CleanUp(ctx context.Context) (err error) { trash := f.srv.FS.GetTrash() items := []*mega.Node{} - _, err = f.list(trash, func(item *mega.Node) bool { + _, err = f.list(ctx, trash, func(item *mega.Node) bool { items = append(items, item) return false }) @@ -454,7 +455,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } @@ -469,7 +470,7 @@ type listFn func(*mega.Node) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true -func (f *Fs) list(dir *mega.Node, fn listFn) (found bool, err error) { +func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) { nodes, err := f.srv.FS.GetChildren(dir) if err != nil { return false, errors.Wrapf(err, "list failed") @@ -492,13 +493,13 @@ func (f *Fs) list(dir *mega.Node, fn listFn) (found bool, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { dirNode, err := f.lookupDir(dir) if err != nil { return nil, err } var iErr error - _, err = f.list(dirNode, func(info *mega.Node) bool { + _, err = f.list(ctx, dirNode, func(info *mega.Node) bool { remote := path.Join(dir, info.GetName()) switch info.GetType() { case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH: @@ -551,14 +552,14 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { existingObj, err := f.newObjectWithInfo(src.Remote(), nil) switch err { case nil: - return existingObj, existingObj.Update(in, src, options...) + return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it - return f.PutUnchecked(in, src) + return f.PutUnchecked(ctx, in, src) default: return nil, err } @@ -573,20 +574,20 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. -func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) o, _, _, err := f.createObject(remote, modTime, size) if err != nil { return nil, err } - return o, o.Update(in, src, options...) + return o, o.Update(ctx, in, src, options...) } // Mkdir creates the directory if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { rootNode, err := f.findRoot(true) if err != nil { return err @@ -648,7 +649,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error { // Rmdir deletes the root folder // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(dir, true) } @@ -662,7 +663,7 @@ func (f *Fs) Precision() time.Duration { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { return f.purgeCheck("", false) } @@ -743,7 +744,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { dstFs := f //log.Printf("Move %q -> %q", src.Remote(), remote) @@ -776,7 +777,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { dstFs := f srcFs, ok := src.(*Fs) if !ok { @@ -824,7 +825,7 @@ func (f *Fs) Hashes() hash.Set { } // PublicLink generates a public link to the remote path (usually readable by anyone) -func (f *Fs) PublicLink(remote string) (link string, err error) { +func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { root, err := f.findRoot(false) if err != nil { return "", errors.Wrap(err, "PublicLink failed to find root node") @@ -842,7 +843,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) { // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. -func (f *Fs) MergeDirs(dirs []fs.Directory) error { +func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { if len(dirs) < 2 { return nil } @@ -861,7 +862,7 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error { // list the the objects infos := []*mega.Node{} - _, err := f.list(srcDirNode, func(info *mega.Node) bool { + _, err := f.list(ctx, srcDirNode, func(info *mega.Node) bool { infos = append(infos, info) return false }) @@ -890,7 +891,7 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error { } // About gets quota information -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { var q mega.QuotaResp var err error err = f.pacer.Call(func() (bool, error) { @@ -929,7 +930,7 @@ func (o *Object) Remote() string { } // Hash returns the hashes of an object -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", hash.ErrUnsupported } @@ -969,12 +970,12 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { return o.info.GetTimeStamp() } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } @@ -1065,7 +1066,7 @@ func (oo *openObject) Close() (err error) { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { @@ -1103,12 +1104,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { // If existing is set then it updates the object rather than creating a new one // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { size := src.Size() if size < 0 { return errors.New("mega backend can't upload a file of unknown length") } - //modTime := src.ModTime() + //modTime := src.ModTime(ctx) remote := o.Remote() // Create the parent directory @@ -1171,7 +1172,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { err := o.fs.deleteNode(o.info) if err != nil { return errors.Wrap(err, "Remove object failed") diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 6b8c9e053..d28bc3f46 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -3,6 +3,7 @@ package onedrive import ( + "context" "encoding/base64" "encoding/hex" "encoding/json" @@ -353,7 +354,7 @@ func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string } // readMetaDataForPath reads the metadata from the path (relative to the absolute root) -func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) { +func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, resp *http.Response, err error) { firstSlashIndex := strings.IndexRune(path, '/') if f.driveType != driveTypePersonal || firstSlashIndex == -1 { @@ -406,7 +407,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon if !insideRoot || !dirCacheFoundRoot { // We do not have the normalized ID in dirCache for our query to base on. Query it manually. firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:] - info, resp, err := f.readMetaDataForPath(firstDir) + info, resp, err := f.readMetaDataForPath(ctx, firstDir) if err != nil { return info, resp, err } @@ -418,7 +419,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon } else { // Read metadata based on firstDir firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:] - baseNormalizedID, err = f.dirCache.FindDir(firstDir, false) + baseNormalizedID, err = f.dirCache.FindDir(ctx, firstDir, false) if err != nil { return nil, nil, err } @@ -463,6 +464,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) // NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -503,12 +505,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Renew the token in the background f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { - _, _, err := f.readMetaDataForPath("") + _, _, err := f.readMetaDataForPath(ctx, "") return err }) // Get rootID - rootInfo, _, err := f.readMetaDataForPath("") + rootInfo, _, err := f.readMetaDataForPath(ctx, "") if err != nil || rootInfo.GetID() == "" { return nil, errors.Wrap(err, "failed to get root") } @@ -516,7 +518,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { f.dirCache = dircache.New(root, rootInfo.GetID(), f) // Find the current root - err = f.dirCache.FindRoot(false) + err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) @@ -524,12 +526,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF) tempF.root = newRoot // Make new Fs which is the parent - err = tempF.dirCache.FindRoot(false) + err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } - _, err := tempF.newObjectWithInfo(remote, nil) + _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f @@ -559,7 +561,7 @@ func (f *Fs) rootSlash() string { // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. -func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) { +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, @@ -569,7 +571,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) // Set info err = o.setMetaData(info) } else { - err = o.readMetaData() // reads info and meta, returning an error + err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err @@ -579,12 +581,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { - return f.newObjectWithInfo(remote, nil) +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID -func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf) _, ok := f.dirCache.GetInv(pathID) if !ok { @@ -607,7 +609,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er } // CreateDir makes a directory with pathID as parent and name leaf -func (f *Fs) CreateDir(dirID, leaf string) (newID string, err error) { +func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf) var resp *http.Response var info *api.Item @@ -697,12 +699,12 @@ OUTER: // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { - err = f.dirCache.FindRoot(false) +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + err = f.dirCache.FindRoot(ctx, false) if err != nil { return nil, err } - directoryID, err := f.dirCache.FindDir(dir, false) + directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } @@ -723,7 +725,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { d.SetItems(folder.ChildCount) entries = append(entries, d) } else { - o, err := f.newObjectWithInfo(remote, info) + o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true @@ -747,9 +749,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // Returns the object, leaf, directoryID and error // // Used to create new objects -func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { +func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist - leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true) + leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true) if err != nil { return nil, leaf, directoryID, err } @@ -766,26 +768,26 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) - o, _, _, err := f.createObject(remote, modTime, size) + o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } - return o, o.Update(in, src, options...) + return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { - err := f.dirCache.FindRoot(true) +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + err := f.dirCache.FindRoot(ctx, true) if err != nil { return err } if dir != "" { - _, err = f.dirCache.FindDir(dir, true) + _, err = f.dirCache.FindDir(ctx, dir, true) } return err } @@ -803,17 +805,17 @@ func (f *Fs) deleteObject(id string) error { // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in -func (f *Fs) purgeCheck(dir string, check bool) error { +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache - err := dc.FindRoot(false) + err := dc.FindRoot(ctx, false) if err != nil { return err } - rootID, err := dc.FindDir(dir, false) + rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } @@ -840,8 +842,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error { // Rmdir deletes the root folder // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { - return f.purgeCheck(dir, true) +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs @@ -850,7 +852,7 @@ func (f *Fs) Precision() time.Duration { } // waitForJob waits for the job with status in url to complete -func (f *Fs) waitForJob(location string, o *Object) error { +func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error { deadline := time.Now().Add(fs.Config.Timeout) for time.Now().Before(deadline) { var resp *http.Response @@ -881,7 +883,7 @@ func (f *Fs) waitForJob(location string, o *Object) error { return errors.Errorf("%s: async operation returned %q", o.remote, status.Status) } case "completed": - err = o.readMetaData() + err = o.readMetaData(ctx) return errors.Wrapf(err, "async operation completed but readMetaData failed") } @@ -899,13 +901,13 @@ func (f *Fs) waitForJob(location string, o *Object) error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } - err := srcObj.readMetaData() + err := srcObj.readMetaData(ctx) if err != nil { return nil, err } @@ -917,7 +919,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } // Create temporary object - dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } @@ -953,7 +955,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } // Wait for job to finish - err = f.waitForJob(location, dstObj) + err = f.waitForJob(ctx, location, dstObj) if err != nil { return nil, err } @@ -961,7 +963,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Copy does NOT copy the modTime from the source and there seems to // be no way to set date before // This will create TWO versions on OneDrive - err = dstObj.SetModTime(srcObj.ModTime()) + err = dstObj.SetModTime(ctx, srcObj.ModTime(ctx)) if err != nil { return nil, err } @@ -974,8 +976,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { - return f.purgeCheck("", false) +func (f *Fs) Purge(ctx context.Context) error { + return f.purgeCheck(ctx, "", false) } // Move src to this remote using server side move operations. @@ -987,7 +989,7 @@ func (f *Fs) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") @@ -995,7 +997,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { } // Create temporary object - dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } @@ -1049,7 +1051,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -1065,14 +1067,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // find the root src directory - err := srcFs.dirCache.FindRoot(false) + err := srcFs.dirCache.FindRoot(ctx, false) if err != nil { return err } // find the root dst directory if dstRemote != "" { - err = f.dirCache.FindRoot(true) + err = f.dirCache.FindRoot(ctx, true) if err != nil { return err } @@ -1088,14 +1090,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { if dstRemote == "" { findPath = f.root } - leaf, dstDirectoryID, err = f.dirCache.FindPath(findPath, true) + leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true) if err != nil { return err } parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID) // Find ID of src - srcID, err := srcFs.dirCache.FindDir(srcRemote, false) + srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) if err != nil { return err } @@ -1109,7 +1111,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { // Check destination does not exist if dstRemote != "" { - _, err = f.dirCache.FindDir(dstRemote, false) + _, err = f.dirCache.FindDir(ctx, dstRemote, false) if err == fs.ErrorDirNotFound { // OK } else if err != nil { @@ -1160,7 +1162,7 @@ func (f *Fs) DirCacheFlush() { } // About gets quota information -func (f *Fs) About() (usage *fs.Usage, err error) { +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { var drive api.Drive opts := rest.Opts{ Method: "GET", @@ -1193,8 +1195,8 @@ func (f *Fs) Hashes() hash.Set { } // PublicLink returns a link for downloading without accout. -func (f *Fs) PublicLink(remote string) (link string, err error) { - info, _, err := f.readMetaDataForPath(f.srvPath(remote)) +func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { + info, _, err := f.readMetaDataForPath(ctx, f.srvPath(remote)) if err != nil { return "", err } @@ -1249,7 +1251,7 @@ func (o *Object) srvPath() string { } // Hash returns the SHA-1 of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if o.fs.driveType == driveTypePersonal { if t == hash.SHA1 { return o.sha1, nil @@ -1264,7 +1266,7 @@ func (o *Object) Hash(t hash.Type) (string, error) { // Size returns the size of an object in bytes func (o *Object) Size() int64 { - err := o.readMetaData() + err := o.readMetaData(context.TODO()) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return 0 @@ -1313,11 +1315,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) { // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info -func (o *Object) readMetaData() (err error) { +func (o *Object) readMetaData(ctx context.Context) (err error) { if o.hasMetaData { return nil } - info, _, err := o.fs.readMetaDataForPath(o.srvPath()) + info, _, err := o.fs.readMetaDataForPath(ctx, o.srvPath()) if err != nil { if apiErr, ok := err.(*api.Error); ok { if apiErr.ErrorInfo.Code == "itemNotFound" { @@ -1334,8 +1336,8 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { - err := o.readMetaData() +func (o *Object) ModTime(ctx context.Context) time.Time { + err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() @@ -1344,9 +1346,9 @@ func (o *Object) ModTime() time.Time { } // setModTime sets the modification time of the local fs object -func (o *Object) setModTime(modTime time.Time) (*api.Item, error) { +func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) { var opts rest.Opts - leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false) + leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false) trueDirID, drive, rootURL := parseNormalizedID(directoryID) if drive != "" { opts = rest.Opts{ @@ -1375,8 +1377,8 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { - info, err := o.setModTime(modTime) +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + info, err := o.setModTime(ctx, modTime) if err != nil { return err } @@ -1389,7 +1391,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { if o.id == "" { return nil, errors.New("can't download - no id") } @@ -1418,8 +1420,8 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { } // createUploadSession creates an upload session for the object -func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) { - leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false) +func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) { + leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false) id, drive, rootURL := parseNormalizedID(directoryID) var opts rest.Opts if drive != "" { @@ -1498,7 +1500,7 @@ func (o *Object) cancelUploadSession(url string) (err error) { } // uploadMultipart uploads a file using multipart upload -func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) { +func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) { if size <= 0 { return nil, errors.New("unknown-sized upload not supported") } @@ -1522,7 +1524,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i // Create upload session fs.Debugf(o, "Starting multipart upload") - session, err := o.createUploadSession(modTime) + session, err := o.createUploadSession(ctx, modTime) if err != nil { close(uploadURLChan) atexit.Unregister(cancelFuncHandle) @@ -1562,7 +1564,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i // Update the content of a remote file within 4MB size in one single request // This function will set modtime after uploading, which will create a new version for the remote file -func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) { +func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) { if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) { return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB") } @@ -1570,7 +1572,7 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) ( fs.Debugf(o, "Starting singlepart upload") var resp *http.Response var opts rest.Opts - leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false) + leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false) trueDirID, drive, rootURL := parseNormalizedID(directoryID) if drive != "" { opts = rest.Opts{ @@ -1608,13 +1610,13 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) ( return nil, err } // Set the mod time now and read metadata - return o.setModTime(modTime) + return o.setModTime(ctx, modTime) } // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { if o.hasMetaData && o.isOneNoteFile { return errors.New("can't upload content to a OneNote file") } @@ -1623,13 +1625,13 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio defer o.fs.tokenRenewer.Stop() size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) var info *api.Item if size > 0 { - info, err = o.uploadMultipart(in, size, modTime) + info, err = o.uploadMultipart(ctx, in, size, modTime) } else if size == 0 { - info, err = o.uploadSinglepart(in, size, modTime) + info, err = o.uploadSinglepart(ctx, in, size, modTime) } else { return errors.New("unknown-sized upload not supported") } @@ -1641,12 +1643,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { return o.fs.deleteObject(o.id) } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { return o.mimeType } diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go index d65106ca5..6240ecf95 100644 --- a/backend/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -2,6 +2,7 @@ package opendrive import ( "bytes" + "context" "fmt" "io" "mime/multipart" @@ -121,6 +122,7 @@ func (f *Fs) DirCacheFlush() { // NewFs constructs an Fs from the path, bucket:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -175,7 +177,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { }).Fill(f) // Find the current root - err = f.dirCache.FindRoot(false) + err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) @@ -184,12 +186,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { tempF.root = newRoot // Make new Fs which is the parent - err = tempF.dirCache.FindRoot(false) + err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } - _, err := tempF.newObjectWithInfo(remote, nil) + _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f @@ -233,14 +235,14 @@ func errorHandler(resp *http.Response) error { } // Mkdir creates the folder if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { // fs.Debugf(nil, "Mkdir(\"%s\")", dir) - err := f.dirCache.FindRoot(true) + err := f.dirCache.FindRoot(ctx, true) if err != nil { return err } if dir != "" { - _, err = f.dirCache.FindDir(dir, true) + _, err = f.dirCache.FindDir(ctx, dir, true) } return err } @@ -261,17 +263,17 @@ func (f *Fs) deleteObject(id string) error { // purgeCheck remotes the root directory, if check is set then it // refuses to do so if it has anything in -func (f *Fs) purgeCheck(dir string, check bool) error { +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache - err := dc.FindRoot(false) + err := dc.FindRoot(ctx, false) if err != nil { return err } - rootID, err := dc.FindDir(dir, false) + rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } @@ -293,9 +295,9 @@ func (f *Fs) purgeCheck(dir string, check bool) error { // Rmdir deletes the root folder // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { // fs.Debugf(nil, "Rmdir(\"%s\")", path.Join(f.root, dir)) - return f.purgeCheck(dir, true) + return f.purgeCheck(ctx, dir, true) } // Precision of the remote @@ -312,14 +314,14 @@ func (f *Fs) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { // fs.Debugf(nil, "Copy(%v)", remote) srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } - err := srcObj.readMetaData() + err := srcObj.readMetaData(ctx) if err != nil { return nil, err } @@ -331,7 +333,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } // Create temporary object - dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } @@ -376,20 +378,20 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { // fs.Debugf(nil, "Move(%v)", remote) srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantCopy } - err := srcObj.readMetaData() + err := srcObj.readMetaData(ctx) if err != nil { return nil, err } // Create temporary object - dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } @@ -432,7 +434,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -448,14 +450,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { } // find the root src directory - err = srcFs.dirCache.FindRoot(false) + err = srcFs.dirCache.FindRoot(ctx, false) if err != nil { return err } // find the root dst directory if dstRemote != "" { - err = f.dirCache.FindRoot(true) + err = f.dirCache.FindRoot(ctx, true) if err != nil { return err } @@ -471,14 +473,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { if dstRemote == "" { findPath = f.root } - leaf, directoryID, err = f.dirCache.FindPath(findPath, true) + leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true) if err != nil { return err } // Check destination does not exist if dstRemote != "" { - _, err = f.dirCache.FindDir(dstRemote, false) + _, err = f.dirCache.FindDir(ctx, dstRemote, false) if err == fs.ErrorDirNotFound { // OK } else if err != nil { @@ -489,7 +491,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { } // Find ID of src - srcID, err := srcFs.dirCache.FindDir(srcRemote, false) + srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) if err != nil { return err } @@ -526,14 +528,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { - return f.purgeCheck("", false) +func (f *Fs) Purge(ctx context.Context) error { + return f.purgeCheck(ctx, "", false) } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. -func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) { +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File) (fs.Object, error) { // fs.Debugf(nil, "newObjectWithInfo(%s, %v)", remote, file) var o *Object @@ -552,7 +554,7 @@ func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) { remote: remote, } - err := o.readMetaData() + err := o.readMetaData(ctx) if err != nil { return nil, err } @@ -562,9 +564,9 @@ func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) { // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // fs.Debugf(nil, "NewObject(\"%s\")", remote) - return f.newObjectWithInfo(remote, nil) + return f.newObjectWithInfo(ctx, remote, nil) } // Creates from the parameters passed in a half finished Object which @@ -573,9 +575,9 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) { // Returns the object, leaf, directoryID and error // // Used to create new objects -func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { +func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist - leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true) + leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true) if err != nil { return nil, leaf, directoryID, err } @@ -613,14 +615,14 @@ func (f *Fs) readMetaDataForFolderID(id string) (info *FolderList, err error) { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) // fs.Debugf(nil, "Put(%s)", remote) - o, leaf, directoryID, err := f.createObject(remote, modTime, size) + o, leaf, directoryID, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } @@ -628,7 +630,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. if "" == o.id { // Attempt to read ID, ignore error // FIXME is this correct? - _ = o.readMetaData() + _ = o.readMetaData(ctx) } if "" == o.id { @@ -651,7 +653,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. o.id = response.FileID } - return o, o.Update(in, src, options...) + return o, o.Update(ctx, in, src, options...) } // retryErrorCodes is a slice of error codes that we will retry @@ -676,7 +678,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { // DirCacher methods // CreateDir makes a directory with pathID as parent and name leaf -func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, replaceReservedChars(leaf)) var resp *http.Response response := createFolderResponse{} @@ -705,7 +707,7 @@ func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { } // FindLeaf finds a directory of name leaf in the folder with ID pathID -func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // fs.Debugf(nil, "FindLeaf(\"%s\", \"%s\")", pathID, leaf) if pathID == "0" && leaf == "" { @@ -751,13 +753,13 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // fs.Debugf(nil, "List(%v)", dir) - err = f.dirCache.FindRoot(false) + err = f.dirCache.FindRoot(ctx, false) if err != nil { return nil, err } - directoryID, err := f.dirCache.FindDir(dir, false) + directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } @@ -791,7 +793,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { file.Name = restoreReservedChars(file.Name) // fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID) remote := path.Join(dir, file.Name) - o, err := f.newObjectWithInfo(remote, &file) + o, err := f.newObjectWithInfo(ctx, remote, &file) if err != nil { return nil, err } @@ -822,7 +824,7 @@ func (o *Object) Remote() string { } // Hash returns the Md5sum of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } @@ -839,12 +841,12 @@ func (o *Object) Size() int64 { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { // fs.Debugf(nil, "SetModTime(%v)", modTime.String()) opts := rest.Opts{ Method: "PUT", @@ -863,7 +865,7 @@ func (o *Object) SetModTime(modTime time.Time) error { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { // fs.Debugf(nil, "Open(\"%v\")", o.remote) fs.FixRangeOption(options, o.size) opts := rest.Opts{ @@ -884,7 +886,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { // fs.Debugf(nil, "Remove(\"%s\")", o.id) return o.fs.pacer.Call(func() (bool, error) { opts := rest.Opts{ @@ -905,9 +907,9 @@ func (o *Object) Storable() bool { // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) // fs.Debugf(nil, "Update(\"%s\", \"%s\")", o.id, o.remote) // Open file for upload @@ -1050,7 +1052,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio o.size = closeResponse.Size // Set the mod time now - err = o.SetModTime(modTime) + err = o.SetModTime(ctx, modTime) if err != nil { return err } @@ -1071,11 +1073,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio return err } - return o.readMetaData() + return o.readMetaData(ctx) } -func (o *Object) readMetaData() (err error) { - leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false) +func (o *Object) readMetaData(ctx context.Context) (err error) { + leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false) if err != nil { if err == fs.ErrorDirNotFound { return fs.ErrorObjectNotFound diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index 2caaa1444..1ee011b05 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -10,6 +10,7 @@ package pcloud import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -192,9 +193,9 @@ func restoreReservedChars(x string) string { } // readMetaDataForPath reads the metadata from the path -func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) { +func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) - leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false) + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, fs.ErrorObjectNotFound @@ -237,6 +238,7 @@ func errorHandler(resp *http.Response) error { // NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -264,7 +266,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Renew the token in the background f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { - _, err := f.readMetaDataForPath("") + _, err := f.readMetaDataForPath(ctx, "") return err }) @@ -272,7 +274,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { f.dirCache = dircache.New(root, rootID, f) // Find the current root - err = f.dirCache.FindRoot(false) + err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) @@ -280,12 +282,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent - err = tempF.dirCache.FindRoot(false) + err = tempF.dirCache.FindRoot(ctx, false) if err != nil { // No root so return old f return f, nil } - _, err := tempF.newObjectWithInfo(remote, nil) + _, err := tempF.newObjectWithInfo(ctx, remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f @@ -307,7 +309,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. -func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) { +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { o := &Object{ fs: f, remote: remote, @@ -317,7 +319,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) // Set info err = o.setMetaData(info) } else { - err = o.readMetaData() // reads info and meta, returning an error + err = o.readMetaData(ctx) // reads info and meta, returning an error } if err != nil { return nil, err @@ -327,12 +329,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { - return f.newObjectWithInfo(remote, nil) +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID -func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID found, err = f.listAll(pathID, true, false, func(item *api.Item) bool { if item.Name == leaf { @@ -345,7 +347,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er } // CreateDir makes a directory with pathID as parent and name leaf -func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) var resp *http.Response var result api.ItemResult @@ -448,12 +450,12 @@ func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn list // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { - err = f.dirCache.FindRoot(false) +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + err = f.dirCache.FindRoot(ctx, false) if err != nil { return nil, err } - directoryID, err := f.dirCache.FindDir(dir, false) + directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } @@ -467,7 +469,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // FIXME more info from dir? entries = append(entries, d) } else { - o, err := f.newObjectWithInfo(remote, info) + o, err := f.newObjectWithInfo(ctx, remote, info) if err != nil { iErr = err return true @@ -491,9 +493,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // Returns the object, leaf, directoryID and error // // Used to create new objects -func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { +func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { // Create the directory for the object if it doesn't exist - leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true) + leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true) if err != nil { return } @@ -510,43 +512,43 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) - o, _, _, err := f.createObject(remote, modTime, size) + o, _, _, err := f.createObject(ctx, remote, modTime, size) if err != nil { return nil, err } - return o, o.Update(in, src, options...) + return o, o.Update(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { - err := f.dirCache.FindRoot(true) +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + err := f.dirCache.FindRoot(ctx, true) if err != nil { return err } if dir != "" { - _, err = f.dirCache.FindDir(dir, true) + _, err = f.dirCache.FindDir(ctx, dir, true) } return err } // purgeCheck removes the root directory, if check is set then it // refuses to do so if it has anything in -func (f *Fs) purgeCheck(dir string, check bool) error { +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache - err := dc.FindRoot(false) + err := dc.FindRoot(ctx, false) if err != nil { return err } - rootID, err := dc.FindDir(dir, false) + rootID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } @@ -580,8 +582,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error { // Rmdir deletes the root folder // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { - return f.purgeCheck(dir, true) +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) } // Precision return the precision of this Fs @@ -598,19 +600,19 @@ func (f *Fs) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } - err := srcObj.readMetaData() + err := srcObj.readMetaData(ctx) if err != nil { return nil, err } // Create temporary object - dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } @@ -647,13 +649,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { - return f.purgeCheck("", false) +func (f *Fs) Purge(ctx context.Context) error { + return f.purgeCheck(ctx, "", false) } // CleanUp empties the trash -func (f *Fs) CleanUp() error { - err := f.dirCache.FindRoot(false) +func (f *Fs) CleanUp(ctx context.Context) error { + err := f.dirCache.FindRoot(ctx, false) if err != nil { return err } @@ -681,7 +683,7 @@ func (f *Fs) CleanUp() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") @@ -689,7 +691,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { } // Create temporary object - dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { return nil, err } @@ -729,7 +731,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -745,14 +747,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // find the root src directory - err := srcFs.dirCache.FindRoot(false) + err := srcFs.dirCache.FindRoot(ctx, false) if err != nil { return err } // find the root dst directory if dstRemote != "" { - err = f.dirCache.FindRoot(true) + err = f.dirCache.FindRoot(ctx, true) if err != nil { return err } @@ -768,14 +770,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { if dstRemote == "" { findPath = f.root } - leaf, directoryID, err = f.dirCache.FindPath(findPath, true) + leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true) if err != nil { return err } // Check destination does not exist if dstRemote != "" { - _, err = f.dirCache.FindDir(dstRemote, false) + _, err = f.dirCache.FindDir(ctx, dstRemote, false) if err == fs.ErrorDirNotFound { // OK } else if err != nil { @@ -786,7 +788,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // Find ID of src - srcID, err := srcFs.dirCache.FindDir(srcRemote, false) + srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) if err != nil { return err } @@ -822,7 +824,7 @@ func (f *Fs) DirCacheFlush() { } // About gets quota information -func (f *Fs) About() (usage *fs.Usage, err error) { +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { opts := rest.Opts{ Method: "POST", Path: "/userinfo", @@ -871,7 +873,7 @@ func (o *Object) Remote() string { } // getHashes fetches the hashes into the object -func (o *Object) getHashes() (err error) { +func (o *Object) getHashes(ctx context.Context) (err error) { var resp *http.Response var result api.ChecksumFileResult opts := rest.Opts{ @@ -893,12 +895,12 @@ func (o *Object) getHashes() (err error) { } // Hash returns the SHA-1 of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 && t != hash.SHA1 { return "", hash.ErrUnsupported } if o.md5 == "" && o.sha1 == "" { - err := o.getHashes() + err := o.getHashes(ctx) if err != nil { return "", errors.Wrap(err, "failed to get hash") } @@ -911,7 +913,7 @@ func (o *Object) Hash(t hash.Type) (string, error) { // Size returns the size of an object in bytes func (o *Object) Size() int64 { - err := o.readMetaData() + err := o.readMetaData(context.TODO()) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return 0 @@ -940,11 +942,11 @@ func (o *Object) setHashes(hashes *api.Hashes) { // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info -func (o *Object) readMetaData() (err error) { +func (o *Object) readMetaData(ctx context.Context) (err error) { if o.hasMetaData { return nil } - info, err := o.fs.readMetaDataForPath(o.remote) + info, err := o.fs.readMetaDataForPath(ctx, o.remote) if err != nil { //if apiErr, ok := err.(*api.Error); ok { // FIXME @@ -962,8 +964,8 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { - err := o.readMetaData() +func (o *Object) ModTime(ctx context.Context) time.Time { + err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() @@ -972,7 +974,7 @@ func (o *Object) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { // Pcloud doesn't have a way of doing this so returning this // error will cause the file to be re-uploaded to set the time. return fs.ErrorCantSetModTime @@ -1015,7 +1017,7 @@ func (o *Object) downloadURL() (URL string, err error) { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { url, err := o.downloadURL() if err != nil { return nil, err @@ -1041,16 +1043,16 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { // If existing is set then it updates the object rather than creating a new one // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { o.fs.tokenRenewer.Start() defer o.fs.tokenRenewer.Stop() size := src.Size() // NB can upload without size - modTime := src.ModTime() + modTime := src.ModTime(ctx) remote := o.Remote() // Create the directory for the object if it doesn't exist - leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true) + leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true) if err != nil { return err } @@ -1073,7 +1075,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio Method: "PUT", Path: "/uploadfile", Body: in, - ContentType: fs.MimeType(o), + ContentType: fs.MimeType(ctx, o), ContentLength: &size, Parameters: url.Values{}, TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding @@ -1114,9 +1116,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio if err != nil { // sometimes pcloud leaves a half complete file on // error, so delete it if it exists - delObj, delErr := o.fs.NewObject(o.remote) + delObj, delErr := o.fs.NewObject(ctx, o.remote) if delErr == nil && delObj != nil { - _ = delObj.Remove() + _ = delObj.Remove(ctx) } return err } @@ -1128,7 +1130,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { opts := rest.Opts{ Method: "POST", Path: "/deletefile", diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index ae684a5af..fcdbee811 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -6,6 +6,7 @@ package qingstor import ( + "context" "fmt" "io" "net/http" @@ -407,12 +408,12 @@ func (f *Fs) Features() *fs.Features { } // Put created a new object -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { fsObj := &Object{ fs: f, remote: src.Remote(), } - return fsObj, fsObj.Update(in, src, options...) + return fsObj, fsObj.Update(ctx, in, src, options...) } // Copy src to this remote using server side copy operations. @@ -424,8 +425,8 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { - err := f.Mkdir("") +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + err := f.Mkdir(ctx, "") if err != nil { return nil, err } @@ -452,12 +453,12 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { fs.Debugf(f, "Copy Failed, API Error: %v", err) return nil, err } - return f.NewObject(remote) + return f.NewObject(ctx, remote) } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } @@ -510,7 +511,7 @@ type listFn func(remote string, object *qs.KeyType, isDirectory bool) error // dir is the starting directory, "" for root // // Set recurse to read sub directories -func (f *Fs) list(dir string, recurse bool, fn listFn) error { +func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error { prefix := f.root if dir != "" { prefix += dir + "/" @@ -620,9 +621,9 @@ func (f *Fs) markBucketOK() { } // listDir lists files and directories to out -func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // List the objects and directories - err = f.list(dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error { + err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err @@ -670,11 +671,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if f.bucket == "" { return f.listBuckets(dir) } - return f.listDir(dir) + return f.listDir(ctx, dir) } // ListR lists the objects and directories of the Fs starting @@ -693,12 +694,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { if f.bucket == "" { return fs.ErrorListBucketRequired } list := walk.NewListRHelper(callback) - err = f.list(dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error { + err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory) if err != nil { return err @@ -734,7 +735,7 @@ func (f *Fs) dirExists() (bool, error) { } // Mkdir creates the bucket if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.bucketOK { @@ -810,7 +811,7 @@ func (f *Fs) dirIsEmpty() (bool, error) { } // Rmdir delete a bucket -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.root != "" || dir != "" { @@ -913,7 +914,7 @@ func (o *Object) readMetaData() (err error) { // ModTime returns the modification date of the file // It should return a best guess if one isn't available -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData() if err != nil { fs.Logf(o, "Failed to read metadata, %v", err) @@ -924,13 +925,13 @@ func (o *Object) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { err := o.readMetaData() if err != nil { return err } o.lastModified = modTime - mimeType := fs.MimeType(o) + mimeType := fs.MimeType(ctx, o) if o.size >= maxSizeForCopy { fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy)) @@ -955,7 +956,7 @@ func (o *Object) SetModTime(modTime time.Time) error { } // Open opens the file for read. Call Close() on the returned io.ReadCloser -func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone) if err != nil { return nil, err @@ -982,16 +983,16 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { } // Update in to the object -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { // The maximum size of upload object is multipartUploadSize * MaxMultipleParts - err := o.fs.Mkdir("") + err := o.fs.Mkdir(ctx, "") if err != nil { return err } key := o.fs.root + o.remote // Guess the content type - mimeType := fs.MimeType(src) + mimeType := fs.MimeType(ctx, src) req := uploadInput{ body: in, @@ -1021,7 +1022,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove this object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone) if err != nil { return err @@ -1041,7 +1042,7 @@ var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) // Hash returns the selected checksum of the file // If no checksum is available it returns "" -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } @@ -1078,7 +1079,7 @@ func (o *Object) Size() int64 { } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { err := o.readMetaData() if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 65525ec13..573c33cc2 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -14,6 +14,7 @@ What happens if you CTRL-C a multipart upload */ import ( + "context" "encoding/base64" "encoding/hex" "fmt" @@ -1109,7 +1110,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Return an Object from a path // //If it can't be found it returns the error ErrorObjectNotFound. -func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error) { +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object) (fs.Object, error) { o := &Object{ fs: f, remote: remote, @@ -1125,7 +1126,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error o.etag = aws.StringValue(info.ETag) o.bytes = aws.Int64Value(info.Size) } else { - err := o.readMetaData() // reads info and meta, returning an error + err := o.readMetaData(ctx) // reads info and meta, returning an error if err != nil { return nil, err } @@ -1135,8 +1136,8 @@ func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { - return f.newObjectWithInfo(remote, nil) +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) } // Gets the bucket location @@ -1192,7 +1193,7 @@ type listFn func(remote string, object *s3.Object, isDirectory bool) error // dir is the starting directory, "" for root // // Set recurse to read sub directories -func (f *Fs) list(dir string, recurse bool, fn listFn) error { +func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error { root := f.root if dir != "" { root += dir + "/" @@ -1215,7 +1216,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error { var resp *s3.ListObjectsOutput var err error err = f.pacer.Call(func() (bool, error) { - resp, err = f.c.ListObjects(&req) + resp, err = f.c.ListObjectsWithContext(ctx, &req) return f.shouldRetry(err) }) if err != nil { @@ -1289,7 +1290,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error { } // Convert a list item into a DirEntry -func (f *Fs) itemToDirEntry(remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) { +func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) { if isDirectory { size := int64(0) if object.Size != nil { @@ -1298,7 +1299,7 @@ func (f *Fs) itemToDirEntry(remote string, object *s3.Object, isDirectory bool) d := fs.NewDir(remote, time.Time{}).SetSize(size) return d, nil } - o, err := f.newObjectWithInfo(remote, object) + o, err := f.newObjectWithInfo(ctx, remote, object) if err != nil { return nil, err } @@ -1316,10 +1317,10 @@ func (f *Fs) markBucketOK() { } // listDir lists files and directories to out -func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { // List the objects and directories - err = f.list(dir, false, func(remote string, object *s3.Object, isDirectory bool) error { - entry, err := f.itemToDirEntry(remote, object, isDirectory) + err = f.list(ctx, dir, false, func(remote string, object *s3.Object, isDirectory bool) error { + entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) if err != nil { return err } @@ -1337,14 +1338,14 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { } // listBuckets lists the buckets to out -func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) listBuckets(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if dir != "" { return nil, fs.ErrorListBucketRequired } req := s3.ListBucketsInput{} var resp *s3.ListBucketsOutput err = f.pacer.Call(func() (bool, error) { - resp, err = f.c.ListBuckets(&req) + resp, err = f.c.ListBucketsWithContext(ctx, &req) return f.shouldRetry(err) }) if err != nil { @@ -1366,11 +1367,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if f.bucket == "" { - return f.listBuckets(dir) + return f.listBuckets(ctx, dir) } - return f.listDir(dir) + return f.listDir(ctx, dir) } // ListR lists the objects and directories of the Fs starting @@ -1389,13 +1390,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { if f.bucket == "" { return fs.ErrorListBucketRequired } list := walk.NewListRHelper(callback) - err = f.list(dir, true, func(remote string, object *s3.Object, isDirectory bool) error { - entry, err := f.itemToDirEntry(remote, object, isDirectory) + err = f.list(ctx, dir, true, func(remote string, object *s3.Object, isDirectory bool) error { + entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) if err != nil { return err } @@ -1410,29 +1411,29 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { } // Put the Object into the bucket -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction fs := &Object{ fs: f, remote: src.Remote(), } - return fs, fs.Update(in, src, options...) + return fs, fs.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // Check if the bucket exists // // NB this can return incorrect results if called immediately after bucket deletion -func (f *Fs) dirExists() (bool, error) { +func (f *Fs) dirExists(ctx context.Context) (bool, error) { req := s3.HeadBucketInput{ Bucket: &f.bucket, } err := f.pacer.Call(func() (bool, error) { - _, err := f.c.HeadBucket(&req) + _, err := f.c.HeadBucketWithContext(ctx, &req) return f.shouldRetry(err) }) if err == nil { @@ -1447,14 +1448,14 @@ func (f *Fs) dirExists() (bool, error) { } // Mkdir creates the bucket if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.bucketOK { return nil } if !f.bucketDeleted { - exists, err := f.dirExists() + exists, err := f.dirExists(ctx) if err == nil { f.bucketOK = exists } @@ -1472,7 +1473,7 @@ func (f *Fs) Mkdir(dir string) error { } } err := f.pacer.Call(func() (bool, error) { - _, err := f.c.CreateBucket(&req) + _, err := f.c.CreateBucketWithContext(ctx, &req) return f.shouldRetry(err) }) if err, ok := err.(awserr.Error); ok { @@ -1491,7 +1492,7 @@ func (f *Fs) Mkdir(dir string) error { // Rmdir deletes the bucket if the fs is at the root // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { f.bucketOKMu.Lock() defer f.bucketOKMu.Unlock() if f.root != "" || dir != "" { @@ -1501,7 +1502,7 @@ func (f *Fs) Rmdir(dir string) error { Bucket: &f.bucket, } err := f.pacer.Call(func() (bool, error) { - _, err := f.c.DeleteBucket(&req) + _, err := f.c.DeleteBucketWithContext(ctx, &req) return f.shouldRetry(err) }) if err == nil { @@ -1532,8 +1533,8 @@ func pathEscape(s string) string { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { - err := f.Mkdir("") +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + err := f.Mkdir(ctx, "") if err != nil { return nil, err } @@ -1562,13 +1563,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { req.StorageClass = &f.opt.StorageClass } err = f.pacer.Call(func() (bool, error) { - _, err = f.c.CopyObject(&req) + _, err = f.c.CopyObjectWithContext(ctx, &req) return f.shouldRetry(err) }) if err != nil { return nil, err } - return f.NewObject(remote) + return f.NewObject(ctx, remote) } // Hashes returns the supported hash sets. @@ -1599,14 +1600,14 @@ func (o *Object) Remote() string { var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) // Hash returns the Md5sum of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } hash := strings.Trim(strings.ToLower(o.etag), `"`) // Check the etag is a valid md5sum if !matchMd5.MatchString(hash) { - err := o.readMetaData() + err := o.readMetaData(ctx) if err != nil { return "", err } @@ -1632,7 +1633,7 @@ func (o *Object) Size() int64 { // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info -func (o *Object) readMetaData() (err error) { +func (o *Object) readMetaData(ctx context.Context) (err error) { if o.meta != nil { return nil } @@ -1644,7 +1645,7 @@ func (o *Object) readMetaData() (err error) { var resp *s3.HeadObjectOutput err = o.fs.pacer.Call(func() (bool, error) { var err error - resp, err = o.fs.c.HeadObject(&req) + resp, err = o.fs.c.HeadObjectWithContext(ctx, &req) return o.fs.shouldRetry(err) }) if err != nil { @@ -1678,11 +1679,11 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { if fs.Config.UseServerModTime { return o.lastModified } - err := o.readMetaData() + err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return time.Now() @@ -1702,8 +1703,8 @@ func (o *Object) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { - err := o.readMetaData() +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + err := o.readMetaData(ctx) if err != nil { return err } @@ -1715,7 +1716,7 @@ func (o *Object) SetModTime(modTime time.Time) error { } // Guess the content type - mimeType := fs.MimeType(o) + mimeType := fs.MimeType(ctx, o) // Copy the object to itself to update the metadata key := o.fs.root + o.remote @@ -1743,7 +1744,7 @@ func (o *Object) SetModTime(modTime time.Time) error { req.StorageClass = &o.fs.opt.StorageClass } err = o.fs.pacer.Call(func() (bool, error) { - _, err := o.fs.c.CopyObject(&req) + _, err := o.fs.c.CopyObjectWithContext(ctx, &req) return o.fs.shouldRetry(err) }) return err @@ -1755,7 +1756,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { key := o.fs.root + o.remote req := s3.GetObjectInput{ Bucket: &o.fs.bucket, @@ -1775,7 +1776,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { var resp *s3.GetObjectOutput err = o.fs.pacer.Call(func() (bool, error) { var err error - resp, err = o.fs.c.GetObject(&req) + resp, err = o.fs.c.GetObjectWithContext(ctx, &req) return o.fs.shouldRetry(err) }) if err, ok := err.(awserr.RequestFailure); ok { @@ -1790,12 +1791,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { } // Update the Object from in with modTime and size -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - err := o.fs.Mkdir("") +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + err := o.fs.Mkdir(ctx, "") if err != nil { return err } - modTime := src.ModTime() + modTime := src.ModTime(ctx) size := src.Size() multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff) @@ -1830,7 +1831,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // disable checksum isn't present. var md5sum string if !multipart || !o.fs.opt.DisableChecksum { - hash, err := src.Hash(hash.MD5) + hash, err := src.Hash(ctx, hash.MD5) if err == nil && matchMd5.MatchString(hash) { hashBytes, err := hex.DecodeString(hash) if err == nil { @@ -1843,7 +1844,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Guess the content type - mimeType := fs.MimeType(src) + mimeType := fs.MimeType(ctx, src) key := o.fs.root + o.remote if multipart { @@ -1866,7 +1867,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio req.StorageClass = &o.fs.opt.StorageClass } err = o.fs.pacer.CallNoRetry(func() (bool, error) { - _, err = uploader.Upload(&req) + _, err = uploader.UploadWithContext(ctx, &req) return o.fs.shouldRetry(err) }) if err != nil { @@ -1915,6 +1916,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio if err != nil { return errors.Wrap(err, "s3 upload: new request") } + httpReq = httpReq.WithContext(ctx) // set the headers we signed and the length httpReq.Header = headers @@ -1942,27 +1944,27 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // Read the metadata from the newly created object o.meta = nil // wipe old metadata - err = o.readMetaData() + err = o.readMetaData(ctx) return err } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { key := o.fs.root + o.remote req := s3.DeleteObjectInput{ Bucket: &o.fs.bucket, Key: &key, } err := o.fs.pacer.Call(func() (bool, error) { - _, err := o.fs.c.DeleteObject(&req) + _, err := o.fs.c.DeleteObjectWithContext(ctx, &req) return o.fs.shouldRetry(err) }) return err } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { - err := o.readMetaData() +func (o *Object) MimeType(ctx context.Context) string { + err := o.readMetaData(ctx) if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) return "" diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index efd5412bc..aaa70413b 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -321,6 +321,7 @@ func (f *Fs) putSftpConnection(pc **conn, err error) { // NewFs creates a new Fs object from the name and root. It connects to // the host specified in the config file. func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -419,12 +420,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass)) } - return NewFsWithConnection(name, root, opt, sshConfig) + return NewFsWithConnection(ctx, name, root, opt, sshConfig) } // NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to // the host specified in the ssh.ClientConfig -func NewFsWithConnection(name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) { +func NewFsWithConnection(ctx context.Context, name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) { f := &Fs{ name: name, root: root, @@ -450,7 +451,7 @@ func NewFsWithConnection(name string, root string, opt *Options, sshConfig *ssh. if f.root == "." { f.root = "" } - _, err := f.NewObject(remote) + _, err := f.NewObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { // File doesn't exist so return old f @@ -491,7 +492,7 @@ func (f *Fs) Precision() time.Duration { } // NewObject creates a new remote sftp file object -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { o := &Object{ fs: f, remote: remote, @@ -536,7 +537,7 @@ func (f *Fs) dirExists(dir string) (bool, error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { root := path.Join(f.root, dir) ok, err := f.dirExists(root) if err != nil { @@ -587,8 +588,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { return entries, nil } -// Put data from into a new remote sftp file object described by and -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +// Put data from into a new remote sftp file object described by and +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { err := f.mkParentDir(src.Remote()) if err != nil { return nil, errors.Wrap(err, "Put mkParentDir failed") @@ -598,7 +599,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. fs: f, remote: src.Remote(), } - err = o.Update(in, src, options...) + err = o.Update(ctx, in, src, options...) if err != nil { return nil, err } @@ -606,8 +607,8 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // mkParentDir makes the parent of remote if necessary and any @@ -649,16 +650,16 @@ func (f *Fs) mkdir(dirPath string) error { } // Mkdir makes the root directory of the Fs object -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { root := path.Join(f.root, dir) return f.mkdir(root) } // Rmdir removes the root directory of the Fs object -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { // Check to see if directory is empty as some servers will // delete recursively with RemoveDirectory - entries, err := f.List(dir) + entries, err := f.List(ctx, dir) if err != nil { return errors.Wrap(err, "Rmdir") } @@ -677,7 +678,7 @@ func (f *Fs) Rmdir(dir string) error { } // Move renames a remote sftp file object -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") @@ -699,7 +700,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { if err != nil { return nil, errors.Wrap(err, "Move Rename failed") } - dstObj, err := f.NewObject(remote) + dstObj, err := f.NewObject(ctx, remote) if err != nil { return nil, errors.Wrap(err, "Move NewObject failed") } @@ -714,7 +715,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -868,7 +869,7 @@ func (o *Object) Remote() string { // Hash returns the selected checksum of the file // If no checksum is available it returns "" -func (o *Object) Hash(r hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { var hashCmd string if r == hash.MD5 { if o.md5sum != nil { @@ -973,7 +974,7 @@ func (o *Object) Size() int64 { } // ModTime returns the modification time of the remote sftp file -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { return o.modTime } @@ -1020,7 +1021,7 @@ func (o *Object) stat() error { // SetModTime sets the modification and access time to the specified time // // it also updates the info field -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { c, err := o.fs.getSftpConnection() if err != nil { return errors.Wrap(err, "SetModTime") @@ -1091,7 +1092,7 @@ func (file *objectReader) Close() (err error) { } // Open a remote sftp file object for reading. Seek is supported -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { @@ -1125,7 +1126,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { } // Update a remote sftp file using the data and ModTime from -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { // Clear the hash cache since we are about to update the object o.md5sum = nil o.sha1sum = nil @@ -1163,7 +1164,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio remove() return errors.Wrap(err, "Update Close failed") } - err = o.SetModTime(src.ModTime()) + err = o.SetModTime(ctx, src.ModTime(ctx)) if err != nil { return errors.Wrap(err, "Update SetModTime failed") } @@ -1171,7 +1172,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove a remote sftp file object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { c, err := o.fs.getSftpConnection() if err != nil { return errors.Wrap(err, "Remove") diff --git a/backend/swift/swift.go b/backend/swift/swift.go index d9de011a3..11320ac2d 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -4,6 +4,7 @@ package swift import ( "bufio" "bytes" + "context" "fmt" "io" "path" @@ -508,7 +509,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er // NewObject finds the Object at remote. If it can't be found it // returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } @@ -652,7 +653,7 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if f.container == "" { return f.listContainers(dir) } @@ -675,7 +676,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { if f.container == "" { return errors.New("container needed for recursive list") } @@ -692,7 +693,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { } // About gets quota information -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { var containers []swift.Container var err error err = f.pacer.Call(func() (bool, error) { @@ -719,23 +720,23 @@ func (f *Fs) About() (*fs.Usage, error) { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // Temporary Object under construction fs := &Object{ fs: f, remote: src.Remote(), headers: swift.Headers{}, // Empty object headers to stop readMetaData being called } - return fs, fs.Update(in, src, options...) + return fs, fs.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { f.containerOKMu.Lock() defer f.containerOKMu.Unlock() if f.containerOK { @@ -773,7 +774,7 @@ func (f *Fs) Mkdir(dir string) error { // Rmdir deletes the container if the fs is at the root // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { f.containerOKMu.Lock() defer f.containerOKMu.Unlock() if f.root != "" || dir != "" { @@ -798,12 +799,12 @@ func (f *Fs) Precision() time.Duration { // Purge deletes all the files and directories // // Implemented here so we can make sure we delete directory markers -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { // Delete all the files including the directory markers toBeDeleted := make(chan fs.Object, fs.Config.Transfers) delErr := make(chan error, 1) go func() { - delErr <- operations.DeleteFiles(toBeDeleted) + delErr <- operations.DeleteFiles(ctx, toBeDeleted) }() err := f.list("", true, func(entry fs.DirEntry) error { if o, ok := entry.(*Object); ok { @@ -819,7 +820,7 @@ func (f *Fs) Purge() error { if err != nil { return err } - return f.Rmdir("") + return f.Rmdir(ctx, "") } // Copy src to this remote using server side copy operations. @@ -831,8 +832,8 @@ func (f *Fs) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { - err := f.Mkdir("") +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + err := f.Mkdir(ctx, "") if err != nil { return nil, err } @@ -850,7 +851,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { if err != nil { return nil, err } - return f.NewObject(remote) + return f.NewObject(ctx, remote) } // Hashes returns the supported hash sets. @@ -879,7 +880,7 @@ func (o *Object) Remote() string { } // Hash returns the Md5sum of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } @@ -976,7 +977,7 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { if fs.Config.UseServerModTime { return o.lastModified } @@ -994,7 +995,7 @@ func (o *Object) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { err := o.readMetaData() if err != nil { return err @@ -1026,7 +1027,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { headers := fs.OpenOptionHeaders(options) _, isRanging := headers["Range"] err = o.fs.pacer.Call(func() (bool, error) { @@ -1170,16 +1171,16 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { if o.fs.container == "" { return fserrors.FatalError(errors.New("container name needed in remote")) } - err := o.fs.Mkdir("") + err := o.fs.Mkdir(ctx, "") if err != nil { return err } size := src.Size() - modTime := src.ModTime() + modTime := src.ModTime(ctx) // Note whether this is a dynamic large object before starting isDynamicLargeObject, err := o.isDynamicLargeObject() @@ -1190,7 +1191,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // Set the mtime m := swift.Metadata{} m.SetModTime(modTime) - contentType := fs.MimeType(src) + contentType := fs.MimeType(ctx, src) headers := m.ObjectHeaders() uniquePrefix := "" if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) { @@ -1233,7 +1234,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { isDynamicLargeObject, err := o.isDynamicLargeObject() if err != nil { return err @@ -1257,7 +1258,7 @@ func (o *Object) Remove() error { } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { return o.contentType } diff --git a/backend/union/union.go b/backend/union/union.go index 8fcd3361d..9be9676c4 100644 --- a/backend/union/union.go +++ b/backend/union/union.go @@ -1,6 +1,7 @@ package union import ( + "context" "fmt" "io" "path" @@ -89,8 +90,8 @@ func (f *Fs) Features() *fs.Features { } // Rmdir removes the root directory of the Fs object -func (f *Fs) Rmdir(dir string) error { - return f.wr.Rmdir(dir) +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.wr.Rmdir(ctx, dir) } // Hashes returns hash.HashNone to indicate remote hashing is unavailable @@ -99,8 +100,8 @@ func (f *Fs) Hashes() hash.Set { } // Mkdir makes the root directory of the Fs object -func (f *Fs) Mkdir(dir string) error { - return f.wr.Mkdir(dir) +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + return f.wr.Mkdir(ctx, dir) } // Purge all files in the root and the root directory @@ -109,8 +110,8 @@ func (f *Fs) Mkdir(dir string) error { // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist -func (f *Fs) Purge() error { - return f.wr.Features().Purge() +func (f *Fs) Purge(ctx context.Context) error { + return f.wr.Features().Purge(ctx) } // Copy src to this remote using server side copy operations. @@ -122,12 +123,12 @@ func (f *Fs) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { if src.Fs() != f.wr { fs.Debugf(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } - o, err := f.wr.Features().Copy(src, remote) + o, err := f.wr.Features().Copy(ctx, src, remote) if err != nil { return nil, err } @@ -143,12 +144,12 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { if src.Fs() != f.wr { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } - o, err := f.wr.Features().Move(src, remote) + o, err := f.wr.Features().Move(ctx, src, remote) if err != nil { return nil, err } @@ -163,13 +164,13 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } - return f.wr.Features().DirMove(srcFs.wr, srcRemote, dstRemote) + return f.wr.Features().DirMove(ctx, srcFs.wr, srcRemote, dstRemote) } // ChangeNotify calls the passed function with a path @@ -181,14 +182,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { // The ChangeNotify implementation must empty the channel // regularly. When the channel gets closed, the implementation // should stop polling and release resources. -func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) { +func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch <-chan time.Duration) { var remoteChans []chan time.Duration for _, remote := range f.remotes { if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil { ch := make(chan time.Duration) remoteChans = append(remoteChans, ch) - ChangeNotify(fn, ch) + ChangeNotify(ctx, fn, ch) } } @@ -219,8 +220,8 @@ func (f *Fs) DirCacheFlush() { // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - o, err := f.wr.Features().PutStream(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + o, err := f.wr.Features().PutStream(ctx, in, src, options...) if err != nil { return nil, err } @@ -228,8 +229,8 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption } // About gets quota information from the Fs -func (f *Fs) About() (*fs.Usage, error) { - return f.wr.Features().About() +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { + return f.wr.Features().About(ctx) } // Put in to the remote path with the modTime given of the given size @@ -237,8 +238,8 @@ func (f *Fs) About() (*fs.Usage, error) { // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - o, err := f.wr.Put(in, src, options...) +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + o, err := f.wr.Put(ctx, in, src, options...) if err != nil { return nil, err } @@ -254,11 +255,11 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { set := make(map[string]fs.DirEntry) found := false for _, remote := range f.remotes { - var remoteEntries, err = remote.List(dir) + var remoteEntries, err = remote.List(ctx, dir) if err == fs.ErrorDirNotFound { continue } @@ -283,10 +284,10 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { } // NewObject creates a new remote union file object based on the first Object it finds (reverse remote order) -func (f *Fs) NewObject(path string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, path string) (fs.Object, error) { for i := range f.remotes { var remote = f.remotes[len(f.remotes)-i-1] - var obj, err = remote.NewObject(path) + var obj, err = remote.NewObject(ctx, path) if err == fs.ErrorObjectNotFound { continue } diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go index bc00bddea..6517d06fe 100644 --- a/backend/webdav/webdav.go +++ b/backend/webdav/webdav.go @@ -9,6 +9,7 @@ package webdav import ( "bytes" + "context" "encoding/xml" "fmt" "io" @@ -282,6 +283,7 @@ func (o *Object) filePath() string { // NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -343,7 +345,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { if f.root == "." { f.root = "" } - _, err := f.NewObject(remote) + _, err := f.NewObject(ctx, remote) if err != nil { if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { // File doesn't exist so return old f @@ -432,7 +434,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Prop) (fs.Object, error) // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } @@ -558,7 +560,7 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { var iErr error _, err = f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool { if isDir { @@ -605,19 +607,19 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - o := f.createObject(src.Remote(), src.ModTime(), src.Size()) - return o, o.Update(in, src, options...) +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size()) + return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // mkParentDir makes the parent of the native path dirPath if // necessary and any directories above that -func (f *Fs) mkParentDir(dirPath string) error { +func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error { // defer log.Trace(dirPath, "")("") // chop off trailing / if it exists if strings.HasSuffix(dirPath, "/") { @@ -627,7 +629,7 @@ func (f *Fs) mkParentDir(dirPath string) error { if parent == "." { parent = "" } - return f.mkdir(parent) + return f.mkdir(ctx, parent) } // low level mkdir, only makes the directory, doesn't attempt to create parents @@ -660,13 +662,13 @@ func (f *Fs) _mkdir(dirPath string) error { } // mkdir makes the directory and parents using native paths -func (f *Fs) mkdir(dirPath string) error { +func (f *Fs) mkdir(ctx context.Context, dirPath string) error { // defer log.Trace(dirPath, "")("") err := f._mkdir(dirPath) if apiErr, ok := err.(*api.Error); ok { // parent does not exist so create it first then try again if apiErr.StatusCode == http.StatusConflict { - err = f.mkParentDir(dirPath) + err = f.mkParentDir(ctx, dirPath) if err == nil { err = f._mkdir(dirPath) } @@ -676,9 +678,9 @@ func (f *Fs) mkdir(dirPath string) error { } // Mkdir creates the directory if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { dirPath := f.dirPath(dir) - return f.mkdir(dirPath) + return f.mkdir(ctx, dirPath) } // dirNotEmpty returns true if the directory exists and is not Empty @@ -723,7 +725,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error { // Rmdir deletes the root folder // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(dir, true) } @@ -741,7 +743,7 @@ func (f *Fs) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy/fs.ErrorCantMove -func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object, error) { +func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, method string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") @@ -751,7 +753,7 @@ func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object, return nil, fs.ErrorCantMove } dstPath := f.filePath(remote) - err := f.mkParentDir(dstPath) + err := f.mkParentDir(ctx, dstPath) if err != nil { return nil, errors.Wrap(err, "Copy mkParentDir failed") } @@ -770,7 +772,7 @@ func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object, }, } if f.useOCMtime { - opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9) + opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9) } err = f.pacer.Call(func() (bool, error) { resp, err = f.srv.Call(&opts) @@ -779,7 +781,7 @@ func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object, if err != nil { return nil, errors.Wrap(err, "Copy call failed") } - dstObj, err := f.NewObject(remote) + dstObj, err := f.NewObject(ctx, remote) if err != nil { return nil, errors.Wrap(err, "Copy NewObject failed") } @@ -795,8 +797,8 @@ func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object, // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { - return f.copyOrMove(src, remote, "COPY") +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + return f.copyOrMove(ctx, src, remote, "COPY") } // Purge deletes all the files and the container @@ -804,7 +806,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { return f.purgeCheck("", false) } @@ -817,8 +819,8 @@ func (f *Fs) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { - return f.copyOrMove(src, remote, "MOVE") +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + return f.copyOrMove(ctx, src, remote, "MOVE") } // DirMove moves src, srcRemote to this remote at dstRemote @@ -829,7 +831,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -848,7 +850,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // Make sure the parent directory exists - err = f.mkParentDir(dstPath) + err = f.mkParentDir(ctx, dstPath) if err != nil { return errors.Wrap(err, "DirMove mkParentDir dst failed") } @@ -887,7 +889,7 @@ func (f *Fs) Hashes() hash.Set { } // About gets quota information -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { opts := rest.Opts{ Method: "PROPFIND", Path: "", @@ -949,7 +951,7 @@ func (o *Object) Remote() string { } // Hash returns the SHA1 or MD5 of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if o.fs.hasChecksums { switch t { case hash.SHA1: @@ -1002,7 +1004,7 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData() if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) @@ -1012,7 +1014,7 @@ func (o *Object) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { return fs.ErrorCantSetModTime } @@ -1022,7 +1024,7 @@ func (o *Object) Storable() bool { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var resp *http.Response opts := rest.Opts{ Method: "GET", @@ -1044,8 +1046,8 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { // If existing is set then it updates the object rather than creating a new one // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { - err = o.fs.mkParentDir(o.filePath()) +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + err = o.fs.mkParentDir(ctx, o.filePath()) if err != nil { return errors.Wrap(err, "Update mkParentDir failed") } @@ -1058,21 +1060,21 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio Body: in, NoResponse: true, ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365 - ContentType: fs.MimeType(src), + ContentType: fs.MimeType(ctx, src), } if o.fs.useOCMtime || o.fs.hasChecksums { opts.ExtraHeaders = map[string]string{} if o.fs.useOCMtime { - opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9) + opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9) } if o.fs.hasChecksums { // Set an upload checksum - prefer SHA1 // // This is used as an upload integrity test. If we set // only SHA1 here, owncloud will calculate the MD5 too. - if sha1, _ := src.Hash(hash.SHA1); sha1 != "" { + if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" { opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1 - } else if md5, _ := src.Hash(hash.MD5); md5 != "" { + } else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" { opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5 } } @@ -1089,7 +1091,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // finished - ncw time.Sleep(1 * time.Second) // Remove failed upload - _ = o.Remove() + _ = o.Remove(ctx) return err } // read metadata from remote @@ -1098,7 +1100,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { opts := rest.Opts{ Method: "DELETE", Path: o.filePath(), diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go index 864cb25f4..fc6d74924 100644 --- a/backend/yandex/yandex.go +++ b/backend/yandex/yandex.go @@ -1,6 +1,7 @@ package yandex import ( + "context" "encoding/json" "fmt" "io" @@ -330,7 +331,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { root := f.dirPath(dir) var limit uint64 = 1000 // max number of objects per request @@ -410,7 +411,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.ResourceInfoResponse) (f // NewObject finds the Object at remote. If it can't be found it // returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } @@ -434,14 +435,14 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - o := f.createObject(src.Remote(), src.ModTime(), src.Size()) - return o, o.Update(in, src, options...) +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size()) + return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.Put(in, src, options...) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) } // CreateDir makes a directory @@ -518,7 +519,7 @@ func (f *Fs) mkParentDirs(resPath string) error { } // Mkdir creates the container if it doesn't exist -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { path := f.filePath(dir) return f.mkDirs(path) } @@ -621,7 +622,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error { // Rmdir deletes the container // // Returns an error if it isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { return f.purgeCheck(dir, true) } @@ -630,7 +631,7 @@ func (f *Fs) Rmdir(dir string) error { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context) error { return f.purgeCheck("", false) } @@ -681,7 +682,7 @@ func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't copy - not same remote type") @@ -699,7 +700,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { return nil, errors.Wrap(err, "couldn't copy file") } - return f.NewObject(remote) + return f.NewObject(ctx, remote) } // Move src to this remote using server side move operations. @@ -711,7 +712,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") @@ -729,7 +730,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { return nil, errors.Wrap(err, "couldn't move file") } - return f.NewObject(remote) + return f.NewObject(ctx, remote) } // DirMove moves src, srcRemote to this remote at dstRemote @@ -740,7 +741,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(srcFs, "Can't move directory - not same remote type") @@ -783,7 +784,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { } // PublicLink generates a public link to the remote path (usually readable by anyone) -func (f *Fs) PublicLink(remote string) (link string, err error) { +func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { var path string if f.opt.Unlink { path = "/resources/unpublish" @@ -830,7 +831,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) { } // CleanUp permanently deletes all trashed files/folders -func (f *Fs) CleanUp() (err error) { +func (f *Fs) CleanUp(ctx context.Context) (err error) { var resp *http.Response opts := rest.Opts{ Method: "DELETE", @@ -846,7 +847,7 @@ func (f *Fs) CleanUp() (err error) { } // About gets quota information -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { opts := rest.Opts{ Method: "GET", Path: "/", @@ -941,7 +942,7 @@ func (o *Object) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *Object) ModTime() time.Time { +func (o *Object) ModTime(ctx context.Context) time.Time { err := o.readMetaData() if err != nil { fs.Logf(o, "Failed to read metadata: %v", err) @@ -961,7 +962,7 @@ func (o *Object) Size() int64 { } // Hash returns the Md5sum of an object returning a lowercase hex string -func (o *Object) Hash(t hash.Type) (string, error) { +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } @@ -998,7 +999,7 @@ func (o *Object) setCustomProperty(property string, value string) (err error) { // SetModTime sets the modification time of the local fs object // // Commits the datastore -func (o *Object) SetModTime(modTime time.Time) error { +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { // set custom_property 'rclone_modified' of object to modTime err := o.setCustomProperty("rclone_modified", modTime.Format(time.RFC3339Nano)) if err != nil { @@ -1009,7 +1010,7 @@ func (o *Object) SetModTime(modTime time.Time) error { } // Open an object for read -func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { // prepare download var resp *http.Response var dl api.AsyncInfo @@ -1090,9 +1091,9 @@ func (o *Object) upload(in io.Reader, overwrite bool, mimeType string) (err erro // Copy the reader into the object updating modTime and size // // The new object may have been created if an error is returned -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { in1 := readers.NewCountingReader(in) - modTime := src.ModTime() + modTime := src.ModTime(ctx) remote := o.filePath() //create full path to file before upload. @@ -1102,7 +1103,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } //upload file - err = o.upload(in1, true, fs.MimeType(src)) + err = o.upload(in1, true, fs.MimeType(ctx, src)) if err != nil { return err } @@ -1112,18 +1113,18 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio o.md5sum = "" // according to unit tests after put the md5 is empty. o.size = int64(in1.BytesRead()) // better solution o.readMetaData() ? //and set modTime of uploaded file - err = o.SetModTime(modTime) + err = o.SetModTime(ctx, modTime) return err } // Remove an object -func (o *Object) Remove() error { +func (o *Object) Remove(ctx context.Context) error { return o.fs.delete(o.filePath(), false) } // MimeType of an Object if known, "" otherwise -func (o *Object) MimeType() string { +func (o *Object) MimeType(ctx context.Context) string { return o.mimeType } diff --git a/cmd/about/about.go b/cmd/about/about.go index 4b29afa8b..ab5a52cf9 100644 --- a/cmd/about/about.go +++ b/cmd/about/about.go @@ -1,6 +1,7 @@ package about import ( + "context" "encoding/json" "fmt" "os" @@ -91,7 +92,7 @@ Use the --json flag for a computer readable output, eg if doAbout == nil { return errors.Errorf("%v doesn't support about", f) } - u, err := doAbout() + u, err := doAbout(context.Background()) if err != nil { return errors.Wrap(err, "About call failed") } diff --git a/cmd/cat/cat.go b/cmd/cat/cat.go index 4ad188021..bf2b1b6f0 100644 --- a/cmd/cat/cat.go +++ b/cmd/cat/cat.go @@ -1,6 +1,7 @@ package cat import ( + "context" "io" "io/ioutil" "log" @@ -74,7 +75,7 @@ Note that if offset is negative it will count from the end, so w = ioutil.Discard } cmd.Run(false, false, command, func() error { - return operations.Cat(fsrc, w, offset, count) + return operations.Cat(context.Background(), fsrc, w, offset, count) }) }, } diff --git a/cmd/check/check.go b/cmd/check/check.go index 793e0e2b6..aa95a5c69 100644 --- a/cmd/check/check.go +++ b/cmd/check/check.go @@ -1,6 +1,8 @@ package check import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/spf13/cobra" @@ -43,9 +45,9 @@ destination that are not in the source will not trigger an error. fsrc, fdst := cmd.NewFsSrcDst(args) cmd.Run(false, false, command, func() error { if download { - return operations.CheckDownload(fdst, fsrc, oneway) + return operations.CheckDownload(context.Background(), fdst, fsrc, oneway) } - return operations.Check(fdst, fsrc, oneway) + return operations.Check(context.Background(), fdst, fsrc, oneway) }) }, } diff --git a/cmd/cleanup/cleanup.go b/cmd/cleanup/cleanup.go index c1b894e99..cb5085022 100644 --- a/cmd/cleanup/cleanup.go +++ b/cmd/cleanup/cleanup.go @@ -1,6 +1,8 @@ package cleanup import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/spf13/cobra" @@ -21,7 +23,7 @@ versions. Not supported by all remotes. cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(true, false, command, func() error { - return operations.CleanUp(fsrc) + return operations.CleanUp(context.Background(), fsrc) }) }, } diff --git a/cmd/copy/copy.go b/cmd/copy/copy.go index 325671f04..34cdb31ff 100644 --- a/cmd/copy/copy.go +++ b/cmd/copy/copy.go @@ -1,6 +1,8 @@ package copy import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/ncw/rclone/fs/sync" @@ -74,9 +76,9 @@ changed recently very efficiently like this: fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args) cmd.Run(true, true, command, func() error { if srcFileName == "" { - return sync.CopyDir(fdst, fsrc, createEmptySrcDirs) + return sync.CopyDir(context.Background(), fdst, fsrc, createEmptySrcDirs) } - return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName) + return operations.CopyFile(context.Background(), fdst, fsrc, srcFileName, srcFileName) }) }, } diff --git a/cmd/copyto/copyto.go b/cmd/copyto/copyto.go index 50f110a60..8d7846d86 100644 --- a/cmd/copyto/copyto.go +++ b/cmd/copyto/copyto.go @@ -1,6 +1,8 @@ package copyto import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/ncw/rclone/fs/sync" @@ -48,9 +50,9 @@ destination. fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args) cmd.Run(true, true, command, func() error { if srcFileName == "" { - return sync.CopyDir(fdst, fsrc, false) + return sync.CopyDir(context.Background(), fdst, fsrc, false) } - return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName) + return operations.CopyFile(context.Background(), fdst, fsrc, dstFileName, srcFileName) }) }, } diff --git a/cmd/copyurl/copyurl.go b/cmd/copyurl/copyurl.go index d09324dd6..f21d91be4 100644 --- a/cmd/copyurl/copyurl.go +++ b/cmd/copyurl/copyurl.go @@ -1,6 +1,8 @@ package copyurl import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/spf13/cobra" @@ -22,7 +24,7 @@ without saving it in tmp storage. fsdst, dstFileName := cmd.NewFsDstFile(args[1:]) cmd.Run(true, true, command, func() error { - _, err := operations.CopyURL(fsdst, dstFileName, args[0]) + _, err := operations.CopyURL(context.Background(), fsdst, dstFileName, args[0]) return err }) }, diff --git a/cmd/cryptcheck/cryptcheck.go b/cmd/cryptcheck/cryptcheck.go index 1a8fcd4da..66e0a95ce 100644 --- a/cmd/cryptcheck/cryptcheck.go +++ b/cmd/cryptcheck/cryptcheck.go @@ -1,6 +1,8 @@ package cryptcheck import ( + "context" + "github.com/ncw/rclone/backend/crypt" "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs" @@ -55,13 +57,13 @@ destination that are not in the source will not trigger an error. cmd.CheckArgs(2, 2, command, args) fsrc, fdst := cmd.NewFsSrcDst(args) cmd.Run(false, true, command, func() error { - return cryptCheck(fdst, fsrc) + return cryptCheck(context.Background(), fdst, fsrc) }) }, } // cryptCheck checks the integrity of a crypted remote -func cryptCheck(fdst, fsrc fs.Fs) error { +func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error { // Check to see fcrypt is a crypt fcrypt, ok := fdst.(*crypt.Fs) if !ok { @@ -79,10 +81,10 @@ func cryptCheck(fdst, fsrc fs.Fs) error { // // it returns true if differences were found // it also returns whether it couldn't be hashed - checkIdentical := func(dst, src fs.Object) (differ bool, noHash bool) { + checkIdentical := func(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) { cryptDst := dst.(*crypt.Object) underlyingDst := cryptDst.UnWrap() - underlyingHash, err := underlyingDst.Hash(hashType) + underlyingHash, err := underlyingDst.Hash(ctx, hashType) if err != nil { fs.CountError(err) fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err) @@ -91,7 +93,7 @@ func cryptCheck(fdst, fsrc fs.Fs) error { if underlyingHash == "" { return false, true } - cryptHash, err := fcrypt.ComputeHash(cryptDst, src, hashType) + cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType) if err != nil { fs.CountError(err) fs.Errorf(dst, "Error computing hash: %v", err) @@ -110,5 +112,5 @@ func cryptCheck(fdst, fsrc fs.Fs) error { return false, false } - return operations.CheckFn(fcrypt, fsrc, checkIdentical, oneway) + return operations.CheckFn(ctx, fcrypt, fsrc, checkIdentical, oneway) } diff --git a/cmd/dbhashsum/dbhashsum.go b/cmd/dbhashsum/dbhashsum.go index d49ab4161..a1835c097 100644 --- a/cmd/dbhashsum/dbhashsum.go +++ b/cmd/dbhashsum/dbhashsum.go @@ -1,6 +1,7 @@ package dbhashsum import ( + "context" "os" "github.com/ncw/rclone/cmd" @@ -25,7 +26,7 @@ The output is in the same format as md5sum and sha1sum. cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { - return operations.DropboxHashSum(fsrc, os.Stdout) + return operations.DropboxHashSum(context.Background(), fsrc, os.Stdout) }) }, } diff --git a/cmd/dedupe/dedupe.go b/cmd/dedupe/dedupe.go index 1915f8745..cdce24e21 100644 --- a/cmd/dedupe/dedupe.go +++ b/cmd/dedupe/dedupe.go @@ -1,6 +1,7 @@ package dedupe import ( + "context" "log" "github.com/ncw/rclone/cmd" @@ -112,7 +113,7 @@ Or } fdst := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { - return operations.Deduplicate(fdst, dedupeMode) + return operations.Deduplicate(context.Background(), fdst, dedupeMode) }) }, } diff --git a/cmd/delete/delete.go b/cmd/delete/delete.go index fcdc77f39..1966d3a5f 100644 --- a/cmd/delete/delete.go +++ b/cmd/delete/delete.go @@ -1,6 +1,8 @@ package delete import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/spf13/cobra" @@ -39,7 +41,7 @@ delete all files bigger than 100MBytes. cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(true, false, command, func() error { - return operations.Delete(fsrc) + return operations.Delete(context.Background(), fsrc) }) }, } diff --git a/cmd/deletefile/deletefile.go b/cmd/deletefile/deletefile.go index 3e409cef4..710a69891 100644 --- a/cmd/deletefile/deletefile.go +++ b/cmd/deletefile/deletefile.go @@ -1,6 +1,8 @@ package deletefile import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/pkg/errors" @@ -26,11 +28,11 @@ it will always be removed. if fileName == "" { return errors.Errorf("%s is a directory or doesn't exist", args[0]) } - fileObj, err := fs.NewObject(fileName) + fileObj, err := fs.NewObject(context.Background(), fileName) if err != nil { return err } - return operations.DeleteFile(fileObj) + return operations.DeleteFile(context.Background(), fileObj) }) }, } diff --git a/cmd/hashsum/hashsum.go b/cmd/hashsum/hashsum.go index e6afcb9fd..a2fb3ed2f 100644 --- a/cmd/hashsum/hashsum.go +++ b/cmd/hashsum/hashsum.go @@ -1,6 +1,7 @@ package hashsum import ( + "context" "errors" "fmt" "os" @@ -54,7 +55,7 @@ Then } fsrc := cmd.NewFsSrc(args[1:]) cmd.Run(false, false, command, func() error { - return operations.HashLister(ht, fsrc, os.Stdout) + return operations.HashLister(context.Background(), ht, fsrc, os.Stdout) }) return nil }, diff --git a/cmd/info/info.go b/cmd/info/info.go index b4aad3444..aa7794320 100644 --- a/cmd/info/info.go +++ b/cmd/info/info.go @@ -5,6 +5,7 @@ package info import ( "bytes" + "context" "fmt" "io" "sort" @@ -61,13 +62,14 @@ a bit of go code for each one. for i := range args { f := cmd.NewFsDir(args[i : i+1]) cmd.Run(false, false, command, func() error { - return readInfo(f) + return readInfo(context.Background(), f) }) } }, } type results struct { + ctx context.Context f fs.Fs mu sync.Mutex stringNeedsEscaping map[string]position @@ -78,8 +80,9 @@ type results struct { canStream bool } -func newResults(f fs.Fs) *results { +func newResults(ctx context.Context, f fs.Fs) *results { return &results{ + ctx: ctx, f: f, stringNeedsEscaping: make(map[string]position), } @@ -117,7 +120,7 @@ func (r *results) Print() { func (r *results) writeFile(path string) (fs.Object, error) { contents := fstest.RandomString(50) src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f) - return r.f.Put(bytes.NewBufferString(contents), src) + return r.f.Put(r.ctx, bytes.NewBufferString(contents), src) } // check whether normalization is enforced and check whether it is @@ -131,11 +134,11 @@ func (r *results) checkUTF8Normalization() { return } r.canWriteUnnormalized = true - _, err = r.f.NewObject(unnormalized) + _, err = r.f.NewObject(r.ctx, unnormalized) if err == nil { r.canReadUnnormalized = true } - _, err = r.f.NewObject(normalized) + _, err = r.f.NewObject(r.ctx, normalized) if err == nil { r.canReadRenormalized = true } @@ -163,7 +166,7 @@ func (r *results) checkStringPositions(s string) { } else { fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s) } - obj, getErr := r.f.NewObject(path) + obj, getErr := r.f.NewObject(r.ctx, path) if getErr != nil { fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr) } else { @@ -262,7 +265,7 @@ func (r *results) checkStreaming() { in := io.TeeReader(buf, hashIn) objIn := object.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f) - objR, err := putter(in, objIn) + objR, err := putter(r.ctx, in, objIn) if err != nil { fs.Infof(r.f, "Streamed file failed to upload (%v)", err) r.canStream = false @@ -272,7 +275,7 @@ func (r *results) checkStreaming() { hashes := hashIn.Sums() types := objR.Fs().Hashes().Array() for _, Hash := range types { - sum, err := objR.Hash(Hash) + sum, err := objR.Hash(r.ctx, Hash) if err != nil { fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", Hash, err) r.canStream = false @@ -292,12 +295,12 @@ func (r *results) checkStreaming() { r.canStream = true } -func readInfo(f fs.Fs) error { - err := f.Mkdir("") +func readInfo(ctx context.Context, f fs.Fs) error { + err := f.Mkdir(ctx, "") if err != nil { return errors.Wrap(err, "couldn't mkdir") } - r := newResults(f) + r := newResults(ctx, f) if checkControl { r.checkControls() } diff --git a/cmd/link/link.go b/cmd/link/link.go index 4c3fba85d..ba9b22723 100644 --- a/cmd/link/link.go +++ b/cmd/link/link.go @@ -1,6 +1,7 @@ package link import ( + "context" "fmt" "github.com/ncw/rclone/cmd" @@ -30,7 +31,7 @@ without account. cmd.CheckArgs(1, 1, command, args) fsrc, remote := cmd.NewFsFile(args[0]) cmd.Run(false, false, command, func() error { - link, err := operations.PublicLink(fsrc, remote) + link, err := operations.PublicLink(context.Background(), fsrc, remote) if err != nil { return err } diff --git a/cmd/ls/ls.go b/cmd/ls/ls.go index 662f5afdb..6e183ee44 100644 --- a/cmd/ls/ls.go +++ b/cmd/ls/ls.go @@ -1,6 +1,7 @@ package ls import ( + "context" "os" "github.com/ncw/rclone/cmd" @@ -33,7 +34,7 @@ Eg cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { - return operations.List(fsrc, os.Stdout) + return operations.List(context.Background(), fsrc, os.Stdout) }) }, } diff --git a/cmd/lsd/lsd.go b/cmd/lsd/lsd.go index e1123fe72..7c9e8e8dc 100644 --- a/cmd/lsd/lsd.go +++ b/cmd/lsd/lsd.go @@ -1,6 +1,7 @@ package lsd import ( + "context" "os" "github.com/ncw/rclone/cmd" @@ -52,7 +53,7 @@ If you just want the directory names use "rclone lsf --dirs-only". } fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { - return operations.ListDir(fsrc, os.Stdout) + return operations.ListDir(context.Background(), fsrc, os.Stdout) }) }, } diff --git a/cmd/lsf/lsf.go b/cmd/lsf/lsf.go index 1b9ef17ea..bb09cd67a 100644 --- a/cmd/lsf/lsf.go +++ b/cmd/lsf/lsf.go @@ -1,6 +1,7 @@ package lsf import ( + "context" "fmt" "io" "os" @@ -150,14 +151,14 @@ those only (without traversing the whole directory structure): if csv && !separatorFlagSupplied { separator = "," } - return Lsf(fsrc, os.Stdout) + return Lsf(context.Background(), fsrc, os.Stdout) }) }, } // Lsf lists all the objects in the path with modification time, size // and path in specific format. -func Lsf(fsrc fs.Fs, out io.Writer) error { +func Lsf(ctx context.Context, fsrc fs.Fs, out io.Writer) error { var list operations.ListFormat list.SetSeparator(separator) list.SetCSV(csv) @@ -199,7 +200,7 @@ func Lsf(fsrc fs.Fs, out io.Writer) error { } } - return operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error { + return operations.ListJSON(ctx, fsrc, "", &opt, func(item *operations.ListJSONItem) error { _, _ = fmt.Fprintln(out, list.Format(item)) return nil }) diff --git a/cmd/lsf/lsf_test.go b/cmd/lsf/lsf_test.go index a2e90c4d1..d663e15c4 100644 --- a/cmd/lsf/lsf_test.go +++ b/cmd/lsf/lsf_test.go @@ -2,6 +2,7 @@ package lsf import ( "bytes" + "context" "testing" _ "github.com/ncw/rclone/backend/local" @@ -19,7 +20,7 @@ func TestDefaultLsf(t *testing.T) { f, err := fs.NewFs("testfiles") require.NoError(t, err) - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 @@ -36,7 +37,7 @@ func TestRecurseFlag(t *testing.T) { require.NoError(t, err) recurse = true - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 @@ -58,7 +59,7 @@ func TestDirSlashFlag(t *testing.T) { dirSlash = true format = "p" - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 @@ -68,7 +69,7 @@ subdir/ buf = new(bytes.Buffer) dirSlash = false - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 @@ -84,7 +85,7 @@ func TestFormat(t *testing.T) { buf := new(bytes.Buffer) format = "p" - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 @@ -94,7 +95,7 @@ subdir buf = new(bytes.Buffer) format = "s" - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `0 321 @@ -104,7 +105,7 @@ subdir buf = new(bytes.Buffer) format = "hp" - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `d41d8cd98f00b204e9800998ecf8427e;file1 409d6c19451dd39d4a94e42d2ff2c834;file2 @@ -115,7 +116,7 @@ subdir buf = new(bytes.Buffer) format = "p" filesOnly = true - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1 file2 @@ -126,7 +127,7 @@ file3 buf = new(bytes.Buffer) format = "p" dirsOnly = true - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `subdir `, buf.String()) @@ -134,20 +135,20 @@ file3 buf = new(bytes.Buffer) format = "t" - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) - items, _ := list.DirSorted(f, true, "") + items, _ := list.DirSorted(context.Background(), f, true, "") var expectedOutput string for _, item := range items { - expectedOutput += item.ModTime().Format("2006-01-02 15:04:05") + "\n" + expectedOutput += item.ModTime(context.Background()).Format("2006-01-02 15:04:05") + "\n" } assert.Equal(t, expectedOutput, buf.String()) buf = new(bytes.Buffer) format = "sp" - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `0;file1 321;file2 @@ -164,7 +165,7 @@ func TestSeparator(t *testing.T) { format = "ps" buf := new(bytes.Buffer) - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1;0 file2;321 @@ -174,7 +175,7 @@ subdir;-1 separator = "__SEP__" buf = new(bytes.Buffer) - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) assert.Equal(t, `file1__SEP__0 file2__SEP__321 @@ -195,17 +196,17 @@ func TestWholeLsf(t *testing.T) { dirSlash = true buf := new(bytes.Buffer) - err = Lsf(f, buf) + err = Lsf(context.Background(), f, buf) require.NoError(t, err) - items, _ := list.DirSorted(f, true, "") - itemsInSubdir, _ := list.DirSorted(f, true, "subdir") + items, _ := list.DirSorted(context.Background(), f, true, "") + itemsInSubdir, _ := list.DirSorted(context.Background(), f, true, "subdir") var expectedOutput []string for _, item := range items { - expectedOutput = append(expectedOutput, item.ModTime().Format("2006-01-02 15:04:05")) + expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format("2006-01-02 15:04:05")) } for _, item := range itemsInSubdir { - expectedOutput = append(expectedOutput, item.ModTime().Format("2006-01-02 15:04:05")) + expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format("2006-01-02 15:04:05")) } assert.Equal(t, `file1_+_0_+_`+expectedOutput[0]+` diff --git a/cmd/lsjson/lsjson.go b/cmd/lsjson/lsjson.go index d74420f7c..158593615 100644 --- a/cmd/lsjson/lsjson.go +++ b/cmd/lsjson/lsjson.go @@ -1,6 +1,7 @@ package lsjson import ( + "context" "encoding/json" "fmt" "os" @@ -90,7 +91,7 @@ can be processed line by line as each item is written one to a line. cmd.Run(false, false, command, func() error { fmt.Println("[") first := true - err := operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error { + err := operations.ListJSON(context.Background(), fsrc, "", &opt, func(item *operations.ListJSONItem) error { out, err := json.Marshal(item) if err != nil { return errors.Wrap(err, "failed to marshal list object") diff --git a/cmd/lsl/lsl.go b/cmd/lsl/lsl.go index 7021b8429..b945209e1 100644 --- a/cmd/lsl/lsl.go +++ b/cmd/lsl/lsl.go @@ -1,6 +1,7 @@ package lsl import ( + "context" "os" "github.com/ncw/rclone/cmd" @@ -33,7 +34,7 @@ Eg cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { - return operations.ListLong(fsrc, os.Stdout) + return operations.ListLong(context.Background(), fsrc, os.Stdout) }) }, } diff --git a/cmd/md5sum/md5sum.go b/cmd/md5sum/md5sum.go index 6c69b3391..dd585b929 100644 --- a/cmd/md5sum/md5sum.go +++ b/cmd/md5sum/md5sum.go @@ -1,6 +1,7 @@ package md5sum import ( + "context" "os" "github.com/ncw/rclone/cmd" @@ -23,7 +24,7 @@ is in the same format as the standard md5sum tool produces. cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { - return operations.Md5sum(fsrc, os.Stdout) + return operations.Md5sum(context.Background(), fsrc, os.Stdout) }) }, } diff --git a/cmd/memtest/memtest.go b/cmd/memtest/memtest.go index 606ae28d8..fa69abb1f 100644 --- a/cmd/memtest/memtest.go +++ b/cmd/memtest/memtest.go @@ -1,6 +1,7 @@ package memtest import ( + "context" "runtime" "sync" @@ -22,7 +23,8 @@ var commandDefintion = &cobra.Command{ cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { - objects, _, err := operations.Count(fsrc) + ctx := context.Background() + objects, _, err := operations.Count(ctx, fsrc) if err != nil { return err } @@ -31,7 +33,7 @@ var commandDefintion = &cobra.Command{ runtime.GC() runtime.ReadMemStats(&before) var mu sync.Mutex - err = operations.ListFn(fsrc, func(o fs.Object) { + err = operations.ListFn(ctx, fsrc, func(o fs.Object) { mu.Lock() objs = append(objs, o) mu.Unlock() diff --git a/cmd/mkdir/mkdir.go b/cmd/mkdir/mkdir.go index 4cac2d6e2..5887fa2df 100644 --- a/cmd/mkdir/mkdir.go +++ b/cmd/mkdir/mkdir.go @@ -1,6 +1,8 @@ package mkdir import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/spf13/cobra" @@ -17,7 +19,7 @@ var commandDefintion = &cobra.Command{ cmd.CheckArgs(1, 1, command, args) fdst := cmd.NewFsDir(args) cmd.Run(true, false, command, func() error { - return operations.Mkdir(fdst, "") + return operations.Mkdir(context.Background(), fdst, "") }) }, } diff --git a/cmd/mountlib/mounttest/dir.go b/cmd/mountlib/mounttest/dir.go index e32e99460..1ccb363d0 100644 --- a/cmd/mountlib/mounttest/dir.go +++ b/cmd/mountlib/mounttest/dir.go @@ -1,6 +1,7 @@ package mounttest import ( + "context" "os" "testing" "time" @@ -172,7 +173,7 @@ func TestDirCacheFlush(t *testing.T) { run.readLocal(t, localDm, "") assert.Equal(t, dm, localDm, "expected vs fuse mount") - err := run.fremote.Mkdir("dir/subdir") + err := run.fremote.Mkdir(context.Background(), "dir/subdir") require.NoError(t, err) root, err := run.vfs.Root() @@ -208,7 +209,7 @@ func TestDirCacheFlushOnDirRename(t *testing.T) { assert.Equal(t, dm, localDm, "expected vs fuse mount") // expect remotely created directory to not show up - err := run.fremote.Mkdir("dir/subdir") + err := run.fremote.Mkdir(context.Background(), "dir/subdir") require.NoError(t, err) run.readLocal(t, localDm, "") assert.Equal(t, dm, localDm, "expected vs fuse mount") diff --git a/cmd/mountlib/mounttest/fs.go b/cmd/mountlib/mounttest/fs.go index 06a82505d..e289a0099 100644 --- a/cmd/mountlib/mounttest/fs.go +++ b/cmd/mountlib/mounttest/fs.go @@ -3,6 +3,7 @@ package mounttest import ( + "context" "flag" "fmt" "io/ioutil" @@ -119,7 +120,7 @@ func newRun() *Run { log.Fatalf("Failed to open remote %q: %v", *fstest.RemoteName, err) } - err = r.fremote.Mkdir("") + err = r.fremote.Mkdir(context.Background(), "") if err != nil { log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err) } @@ -211,7 +212,7 @@ func (r *Run) cacheMode(cacheMode vfs.CacheMode) { r.vfs.WaitForWriters(30 * time.Second) // Empty and remake the remote r.cleanRemote() - err := r.fremote.Mkdir("") + err := r.fremote.Mkdir(context.Background(), "") if err != nil { log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err) } @@ -296,7 +297,7 @@ func (r *Run) readLocal(t *testing.T, dir dirMap, filePath string) { // reads the remote tree into dir func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) { - objs, dirs, err := walk.GetAll(r.fremote, filepath, true, 1) + objs, dirs, err := walk.GetAll(context.Background(), r.fremote, filepath, true, 1) if err == fs.ErrorDirNotFound { return } diff --git a/cmd/move/move.go b/cmd/move/move.go index 15a9411d8..a74027608 100644 --- a/cmd/move/move.go +++ b/cmd/move/move.go @@ -1,6 +1,8 @@ package move import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/ncw/rclone/fs/sync" @@ -54,9 +56,9 @@ can speed transfers up greatly. fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args) cmd.Run(true, true, command, func() error { if srcFileName == "" { - return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs) + return sync.MoveDir(context.Background(), fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs) } - return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName) + return operations.MoveFile(context.Background(), fdst, fsrc, srcFileName, srcFileName) }) }, } diff --git a/cmd/moveto/moveto.go b/cmd/moveto/moveto.go index e17dac126..dbcfc5524 100644 --- a/cmd/moveto/moveto.go +++ b/cmd/moveto/moveto.go @@ -1,6 +1,8 @@ package moveto import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/ncw/rclone/fs/sync" @@ -52,9 +54,9 @@ transfer. cmd.Run(true, true, command, func() error { if srcFileName == "" { - return sync.MoveDir(fdst, fsrc, false, false) + return sync.MoveDir(context.Background(), fdst, fsrc, false, false) } - return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName) + return operations.MoveFile(context.Background(), fdst, fsrc, dstFileName, srcFileName) }) }, } diff --git a/cmd/ncdu/ncdu.go b/cmd/ncdu/ncdu.go index 6468b787c..dcec3708c 100644 --- a/cmd/ncdu/ncdu.go +++ b/cmd/ncdu/ncdu.go @@ -5,6 +5,7 @@ package ncdu import ( + "context" "fmt" "path" "sort" @@ -423,6 +424,7 @@ func (u *UI) removeEntry(pos int) { // delete the entry at the current position func (u *UI) delete() { + ctx := context.Background() dirPos := u.sortPerm[u.dirPosMap[u.path].entry] entry := u.entries[dirPos] u.boxMenu = []string{"cancel", "confirm"} @@ -431,7 +433,7 @@ func (u *UI) delete() { if o != 1 { return "Aborted!", nil } - err := operations.DeleteFile(obj) + err := operations.DeleteFile(ctx, obj) if err != nil { return "", err } @@ -446,7 +448,7 @@ func (u *UI) delete() { if o != 1 { return "Aborted!", nil } - err := operations.Purge(f, entry.String()) + err := operations.Purge(ctx, f, entry.String()) if err != nil { return "", err } @@ -636,7 +638,7 @@ func (u *UI) Show() error { // scan the disk in the background u.listing = true - rootChan, errChan, updated := scan.Scan(u.f) + rootChan, errChan, updated := scan.Scan(context.Background(), u.f) // Poll the events into a channel events := make(chan termbox.Event) diff --git a/cmd/ncdu/scan/scan.go b/cmd/ncdu/scan/scan.go index ab753256d..e6a9c20c8 100644 --- a/cmd/ncdu/scan/scan.go +++ b/cmd/ncdu/scan/scan.go @@ -2,6 +2,7 @@ package scan import ( + "context" "path" "sync" @@ -160,13 +161,13 @@ func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool) // Scan the Fs passed in, returning a root directory channel and an // error channel -func Scan(f fs.Fs) (chan *Dir, chan error, chan struct{}) { +func Scan(ctx context.Context, f fs.Fs) (chan *Dir, chan error, chan struct{}) { root := make(chan *Dir, 1) errChan := make(chan error, 1) updated := make(chan struct{}, 1) go func() { parents := map[string]*Dir{} - err := walk.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { + err := walk.Walk(ctx, f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { return err // FIXME mark directory as errored instead of aborting } diff --git a/cmd/purge/purge.go b/cmd/purge/purge.go index 33c8b5eb6..0dd310f6f 100644 --- a/cmd/purge/purge.go +++ b/cmd/purge/purge.go @@ -1,6 +1,8 @@ package purge import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/spf13/cobra" @@ -22,7 +24,7 @@ you want to selectively delete files. cmd.CheckArgs(1, 1, command, args) fdst := cmd.NewFsDir(args) cmd.Run(true, false, command, func() error { - return operations.Purge(fdst, "") + return operations.Purge(context.Background(), fdst, "") }) }, } diff --git a/cmd/rc/rc.go b/cmd/rc/rc.go index 40cd23371..b0598934c 100644 --- a/cmd/rc/rc.go +++ b/cmd/rc/rc.go @@ -2,6 +2,7 @@ package rc import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -116,7 +117,7 @@ func doCall(path string, in rc.Params) (out rc.Params, err error) { if call == nil { return nil, errors.Errorf("method %q not found", path) } - return call.Fn(in) + return call.Fn(context.Background(), in) } // Do HTTP request diff --git a/cmd/rcat/rcat.go b/cmd/rcat/rcat.go index f87452c51..3177e329e 100644 --- a/cmd/rcat/rcat.go +++ b/cmd/rcat/rcat.go @@ -1,6 +1,7 @@ package rcat import ( + "context" "log" "os" "time" @@ -50,7 +51,7 @@ a lot of data, you're better off caching locally and then fdst, dstFileName := cmd.NewFsDstFile(args) cmd.Run(false, false, command, func() error { - _, err := operations.Rcat(fdst, dstFileName, os.Stdin, time.Now()) + _, err := operations.Rcat(context.Background(), fdst, dstFileName, os.Stdin, time.Now()) return err }) }, diff --git a/cmd/rmdir/rmdir.go b/cmd/rmdir/rmdir.go index 6f85cf86c..c66303081 100644 --- a/cmd/rmdir/rmdir.go +++ b/cmd/rmdir/rmdir.go @@ -1,6 +1,8 @@ package rmdir import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/spf13/cobra" @@ -20,7 +22,7 @@ objects in it, use purge for that.`, cmd.CheckArgs(1, 1, command, args) fdst := cmd.NewFsDir(args) cmd.Run(true, false, command, func() error { - return operations.Rmdir(fdst, "") + return operations.Rmdir(context.Background(), fdst, "") }) }, } diff --git a/cmd/rmdirs/rmdirs.go b/cmd/rmdirs/rmdirs.go index 41c0e23ee..f02e9bb19 100644 --- a/cmd/rmdirs/rmdirs.go +++ b/cmd/rmdirs/rmdirs.go @@ -1,6 +1,8 @@ package rmdir import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/spf13/cobra" @@ -32,7 +34,7 @@ empty directories in. cmd.CheckArgs(1, 1, command, args) fdst := cmd.NewFsDir(args) cmd.Run(true, false, command, func() error { - return operations.Rmdirs(fdst, "", leaveRoot) + return operations.Rmdirs(context.Background(), fdst, "", leaveRoot) }) }, } diff --git a/cmd/serve/dlna/dlna_test.go b/cmd/serve/dlna/dlna_test.go index 25091a4b6..5dd399eea 100644 --- a/cmd/serve/dlna/dlna_test.go +++ b/cmd/serve/dlna/dlna_test.go @@ -1,6 +1,7 @@ package dlna import ( + "context" "fmt" "io/ioutil" "net/http" @@ -39,7 +40,7 @@ func TestInit(t *testing.T) { config.LoadConfig() f, err := fs.NewFs("testdata/files") - l, _ := f.List("") + l, _ := f.List(context.Background(), "") fmt.Println(l) require.NoError(t, err) diff --git a/cmd/serve/ftp/ftp_test.go b/cmd/serve/ftp/ftp_test.go index 7f3514f6f..a554f1ddf 100644 --- a/cmd/serve/ftp/ftp_test.go +++ b/cmd/serve/ftp/ftp_test.go @@ -8,6 +8,7 @@ package ftp import ( + "context" "fmt" "os" "os/exec" @@ -41,7 +42,7 @@ func TestFTP(t *testing.T) { assert.NoError(t, err) defer clean() - err = fremote.Mkdir("") + err = fremote.Mkdir(context.Background(), "") assert.NoError(t, err) // Start the server diff --git a/cmd/serve/http/http.go b/cmd/serve/http/http.go index c10019480..a170f11e2 100644 --- a/cmd/serve/http/http.go +++ b/cmd/serve/http/http.go @@ -161,7 +161,7 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10)) // Set content type - mimeType := fs.MimeType(obj) + mimeType := fs.MimeType(r.Context(), obj) if mimeType == "application/octet-stream" && path.Ext(remote) == "" { // Leave header blank so http server guesses } else { diff --git a/cmd/serve/httplib/serve/serve.go b/cmd/serve/httplib/serve/serve.go index 2241851e8..ef7b019b1 100644 --- a/cmd/serve/httplib/serve/serve.go +++ b/cmd/serve/httplib/serve/serve.go @@ -28,7 +28,7 @@ func Object(w http.ResponseWriter, r *http.Request, o fs.Object) { } // Set content type - mimeType := fs.MimeType(o) + mimeType := fs.MimeType(r.Context(), o) if mimeType == "application/octet-stream" && path.Ext(o.Remote()) == "" { // Leave header blank so http server guesses } else { @@ -69,7 +69,7 @@ func Object(w http.ResponseWriter, r *http.Request, o fs.Object) { } w.Header().Set("Content-Length", strconv.FormatInt(size, 10)) - file, err := o.Open(options...) + file, err := o.Open(r.Context(), options...) if err != nil { fs.Debugf(o, "Get request open error: %v", err) http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) diff --git a/cmd/serve/restic/restic.go b/cmd/serve/restic/restic.go index c5543fcc2..530a1edb8 100644 --- a/cmd/serve/restic/restic.go +++ b/cmd/serve/restic/restic.go @@ -247,7 +247,7 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) { // get the remote func (s *server) serveObject(w http.ResponseWriter, r *http.Request, remote string) { - o, err := s.f.NewObject(remote) + o, err := s.f.NewObject(r.Context(), remote) if err != nil { fs.Debugf(remote, "%s request error: %v", r.Method, err) http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) @@ -260,7 +260,7 @@ func (s *server) serveObject(w http.ResponseWriter, r *http.Request, remote stri func (s *server) postObject(w http.ResponseWriter, r *http.Request, remote string) { if appendOnly { // make sure the file does not exist yet - _, err := s.f.NewObject(remote) + _, err := s.f.NewObject(r.Context(), remote) if err == nil { fs.Errorf(remote, "Post request: file already exists, refusing to overwrite in append-only mode") http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) @@ -269,7 +269,7 @@ func (s *server) postObject(w http.ResponseWriter, r *http.Request, remote strin } } - _, err := operations.RcatSize(s.f, remote, r.Body, r.ContentLength, time.Now()) + _, err := operations.RcatSize(r.Context(), s.f, remote, r.Body, r.ContentLength, time.Now()) if err != nil { accounting.Stats.Error(err) fs.Errorf(remote, "Post request rcat error: %v", err) @@ -291,14 +291,14 @@ func (s *server) deleteObject(w http.ResponseWriter, r *http.Request, remote str } } - o, err := s.f.NewObject(remote) + o, err := s.f.NewObject(r.Context(), remote) if err != nil { fs.Debugf(remote, "Delete request error: %v", err) http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) return } - if err := o.Remove(); err != nil { + if err := o.Remove(r.Context()); err != nil { fs.Errorf(remote, "Delete request remove error: %v", err) if err == fs.ErrorObjectNotFound { http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) @@ -342,7 +342,7 @@ func (s *server) listObjects(w http.ResponseWriter, r *http.Request, remote stri ls := listItems{} // if remote supports ListR use that directly, otherwise use recursive Walk - err := walk.ListR(s.f, remote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { + err := walk.ListR(r.Context(), s.f, remote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { for _, entry := range entries { ls.add(entry) } @@ -378,7 +378,7 @@ func (s *server) createRepo(w http.ResponseWriter, r *http.Request, remote strin return } - err := s.f.Mkdir(remote) + err := s.f.Mkdir(r.Context(), remote) if err != nil { fs.Errorf(remote, "Create repo failed to Mkdir: %v", err) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) @@ -387,7 +387,7 @@ func (s *server) createRepo(w http.ResponseWriter, r *http.Request, remote strin for _, name := range []string{"data", "index", "keys", "locks", "snapshots"} { dirRemote := path.Join(remote, name) - err := s.f.Mkdir(dirRemote) + err := s.f.Mkdir(r.Context(), dirRemote) if err != nil { fs.Errorf(dirRemote, "Create repo failed to Mkdir: %v", err) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) diff --git a/cmd/serve/restic/restic_test.go b/cmd/serve/restic/restic_test.go index dfdb8f132..26949238b 100644 --- a/cmd/serve/restic/restic_test.go +++ b/cmd/serve/restic/restic_test.go @@ -6,6 +6,7 @@ package restic import ( + "context" "os" "os/exec" "testing" @@ -38,7 +39,7 @@ func TestRestic(t *testing.T) { assert.NoError(t, err) defer clean() - err = fremote.Mkdir("") + err = fremote.Mkdir(context.Background(), "") assert.NoError(t, err) // Start the server diff --git a/cmd/serve/sftp/connection.go b/cmd/serve/sftp/connection.go index 497e2a1fa..6943618a5 100644 --- a/cmd/serve/sftp/connection.go +++ b/cmd/serve/sftp/connection.go @@ -3,6 +3,7 @@ package sftp import ( + "context" "fmt" "io" "net" @@ -53,7 +54,7 @@ type conn struct { // execCommand implements an extrememly limited number of commands to // interoperate with the rclone sftp backend -func (c *conn) execCommand(out io.Writer, command string) (err error) { +func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) { binary, args := command, "" space := strings.Index(command, " ") if space >= 0 { @@ -68,7 +69,7 @@ func (c *conn) execCommand(out io.Writer, command string) (err error) { if about == nil { return errors.New("df not supported") } - usage, err := about() + usage, err := about(ctx) if err != nil { return errors.Wrap(err, "About failed") } @@ -108,7 +109,7 @@ func (c *conn) execCommand(out io.Writer, command string) (err error) { if !ok { return errors.New("unexpected non file") } - hash, err := o.Hash(ht) + hash, err := o.Hash(ctx, ht) if err != nil { return errors.Wrap(err, "hash failed") } @@ -230,7 +231,7 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) { } } else { var rc = uint32(0) - err := c.execCommand(channel, command.Command) + err := c.execCommand(context.TODO(), channel, command.Command) if err != nil { rc = 1 _, errPrint := fmt.Fprintf(channel.Stderr(), "%v\n", err) diff --git a/cmd/serve/sftp/sftp_test.go b/cmd/serve/sftp/sftp_test.go index 2ee9b9f83..08094e916 100644 --- a/cmd/serve/sftp/sftp_test.go +++ b/cmd/serve/sftp/sftp_test.go @@ -8,6 +8,7 @@ package sftp import ( + "context" "os" "os/exec" "strings" @@ -43,7 +44,7 @@ func TestSftp(t *testing.T) { assert.NoError(t, err) defer clean() - err = fremote.Mkdir("") + err = fremote.Mkdir(context.Background(), "") assert.NoError(t, err) opt := DefaultOpt diff --git a/cmd/serve/webdav/webdav.go b/cmd/serve/webdav/webdav.go index bee8e0c5e..e1010a073 100644 --- a/cmd/serve/webdav/webdav.go +++ b/cmd/serve/webdav/webdav.go @@ -284,7 +284,7 @@ func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) { if !ok { return "", webdav.ErrNotImplemented } - hash, err := o.Hash(hashType) + hash, err := o.Hash(ctx, hashType) if err != nil || hash == "" { return "", webdav.ErrNotImplemented } @@ -302,7 +302,7 @@ func (fi FileInfo) ContentType(ctx context.Context) (contentType string, err err entry := node.DirEntry() switch x := entry.(type) { case fs.Object: - return fs.MimeType(x), nil + return fs.MimeType(ctx, x), nil case fs.Directory: return "inode/directory", nil } diff --git a/cmd/serve/webdav/webdav_test.go b/cmd/serve/webdav/webdav_test.go index c80c0b759..d18b63c84 100644 --- a/cmd/serve/webdav/webdav_test.go +++ b/cmd/serve/webdav/webdav_test.go @@ -8,6 +8,7 @@ package webdav import ( + "context" "flag" "io/ioutil" "net/http" @@ -50,7 +51,7 @@ func TestWebDav(t *testing.T) { assert.NoError(t, err) defer clean() - err = fremote.Mkdir("") + err = fremote.Mkdir(context.Background(), "") assert.NoError(t, err) // Start the server diff --git a/cmd/settier/settier.go b/cmd/settier/settier.go index 3bf8c9a0e..447f7b374 100644 --- a/cmd/settier/settier.go +++ b/cmd/settier/settier.go @@ -1,6 +1,8 @@ package settier import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/operations" "github.com/pkg/errors" @@ -48,7 +50,7 @@ Or just provide remote directory and all files in directory will be tiered return errors.Errorf("Remote %s does not support settier", fsrc.Name()) } - return operations.SetTier(fsrc, tier) + return operations.SetTier(context.Background(), fsrc, tier) }) }, } diff --git a/cmd/sha1sum/sha1sum.go b/cmd/sha1sum/sha1sum.go index 6bb048ce3..953af208c 100644 --- a/cmd/sha1sum/sha1sum.go +++ b/cmd/sha1sum/sha1sum.go @@ -1,6 +1,7 @@ package sha1sum import ( + "context" "os" "github.com/ncw/rclone/cmd" @@ -23,7 +24,7 @@ is in the same format as the standard sha1sum tool produces. cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { - return operations.Sha1sum(fsrc, os.Stdout) + return operations.Sha1sum(context.Background(), fsrc, os.Stdout) }) }, } diff --git a/cmd/size/size.go b/cmd/size/size.go index 4297d33ea..c17b273d4 100644 --- a/cmd/size/size.go +++ b/cmd/size/size.go @@ -1,6 +1,7 @@ package size import ( + "context" "encoding/json" "fmt" "os" @@ -31,7 +32,7 @@ var commandDefinition = &cobra.Command{ Bytes int64 `json:"bytes"` } - results.Count, results.Bytes, err = operations.Count(fsrc) + results.Count, results.Bytes, err = operations.Count(context.Background(), fsrc) if err != nil { return err } diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 1d2e80572..d840db248 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -1,6 +1,8 @@ package sync import ( + "context" + "github.com/ncw/rclone/cmd" "github.com/ncw/rclone/fs/sync" "github.com/spf13/cobra" @@ -44,7 +46,7 @@ go there. cmd.CheckArgs(2, 2, command, args) fsrc, fdst := cmd.NewFsSrcDst(args) cmd.Run(true, true, command, func() error { - return sync.Sync(fdst, fsrc, createEmptySrcDirs) + return sync.Sync(context.Background(), fdst, fsrc, createEmptySrcDirs) }) }, } diff --git a/cmd/touch/touch.go b/cmd/touch/touch.go index ce7c45f27..075a389e2 100644 --- a/cmd/touch/touch.go +++ b/cmd/touch/touch.go @@ -2,6 +2,7 @@ package touch import ( "bytes" + "context" "time" "github.com/ncw/rclone/cmd" @@ -33,13 +34,13 @@ var commandDefintion = &cobra.Command{ cmd.CheckArgs(1, 1, command, args) fsrc, srcFileName := cmd.NewFsDstFile(args) cmd.Run(true, false, command, func() error { - return Touch(fsrc, srcFileName) + return Touch(context.Background(), fsrc, srcFileName) }) }, } //Touch create new file or change file modification time. -func Touch(fsrc fs.Fs, srcFileName string) error { +func Touch(ctx context.Context, fsrc fs.Fs, srcFileName string) error { timeAtr := time.Now() if timeAsArgument != "" { layout := defaultLayout @@ -52,19 +53,19 @@ func Touch(fsrc fs.Fs, srcFileName string) error { } timeAtr = timeAtrFromFlags } - file, err := fsrc.NewObject(srcFileName) + file, err := fsrc.NewObject(ctx, srcFileName) if err != nil { if !notCreateNewFile { var buffer []byte src := object.NewStaticObjectInfo(srcFileName, timeAtr, int64(len(buffer)), true, nil, fsrc) - _, err = fsrc.Put(bytes.NewBuffer(buffer), src) + _, err = fsrc.Put(ctx, bytes.NewBuffer(buffer), src) if err != nil { return err } } return nil } - err = file.SetModTime(timeAtr) + err = file.SetModTime(ctx, timeAtr) if err != nil { return errors.Wrap(err, "touch: couldn't set mod time") } diff --git a/cmd/touch/touch_test.go b/cmd/touch/touch_test.go index 686e57b98..eda6b5c09 100644 --- a/cmd/touch/touch_test.go +++ b/cmd/touch/touch_test.go @@ -1,6 +1,7 @@ package touch import ( + "context" "testing" "time" @@ -34,9 +35,9 @@ func TestTouchOneFile(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - err := Touch(r.Fremote, "newFile") + err := Touch(context.Background(), r.Fremote, "newFile") require.NoError(t, err) - _, err = r.Fremote.NewObject("newFile") + _, err = r.Fremote.NewObject(context.Background(), "newFile") require.NoError(t, err) } @@ -45,9 +46,9 @@ func TestTouchWithNoCreateFlag(t *testing.T) { defer r.Finalise() notCreateNewFile = true - err := Touch(r.Fremote, "newFile") + err := Touch(context.Background(), r.Fremote, "newFile") require.NoError(t, err) - _, err = r.Fremote.NewObject("newFile") + _, err = r.Fremote.NewObject(context.Background(), "newFile") require.Error(t, err) notCreateNewFile = false } @@ -58,7 +59,7 @@ func TestTouchWithTimestamp(t *testing.T) { timeAsArgument = "060102" srcFileName := "oldFile" - err := Touch(r.Fremote, srcFileName) + err := Touch(context.Background(), r.Fremote, srcFileName) require.NoError(t, err) checkFile(t, r.Fremote, srcFileName, "") } @@ -69,7 +70,7 @@ func TestTouchWithLognerTimestamp(t *testing.T) { timeAsArgument = "2006-01-02T15:04:05" srcFileName := "oldFile" - err := Touch(r.Fremote, srcFileName) + err := Touch(context.Background(), r.Fremote, srcFileName) require.NoError(t, err) checkFile(t, r.Fremote, srcFileName, "") } @@ -80,11 +81,11 @@ func TestTouchUpdateTimestamp(t *testing.T) { srcFileName := "a" content := "aaa" - file1 := r.WriteObject(srcFileName, content, t1) + file1 := r.WriteObject(context.Background(), srcFileName, content, t1) fstest.CheckItems(t, r.Fremote, file1) timeAsArgument = "121212" - err := Touch(r.Fremote, "a") + err := Touch(context.Background(), r.Fremote, "a") require.NoError(t, err) checkFile(t, r.Fremote, srcFileName, content) } @@ -95,12 +96,12 @@ func TestTouchUpdateTimestampWithCFlag(t *testing.T) { srcFileName := "a" content := "aaa" - file1 := r.WriteObject(srcFileName, content, t1) + file1 := r.WriteObject(context.Background(), srcFileName, content, t1) fstest.CheckItems(t, r.Fremote, file1) notCreateNewFile = true timeAsArgument = "121212" - err := Touch(r.Fremote, "a") + err := Touch(context.Background(), r.Fremote, "a") require.NoError(t, err) checkFile(t, r.Fremote, srcFileName, content) notCreateNewFile = false @@ -111,7 +112,7 @@ func TestTouchCreateMultipleDirAndFile(t *testing.T) { defer r.Finalise() longPath := "a/b/c.txt" - err := Touch(r.Fremote, longPath) + err := Touch(context.Background(), r.Fremote, longPath) require.NoError(t, err) file1 := fstest.NewItem("a/b/c.txt", "", t1) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"a", "a/b"}, fs.ModTimeNotSupported) diff --git a/cmd/tree/tree.go b/cmd/tree/tree.go index e451df7b5..13d8ec0ab 100644 --- a/cmd/tree/tree.go +++ b/cmd/tree/tree.go @@ -1,6 +1,7 @@ package tree import ( + "context" "fmt" "io" "os" @@ -117,7 +118,7 @@ short options as they conflict with rclone's short options. // Tree lists fsrc to outFile using the Options passed in func Tree(fsrc fs.Fs, outFile io.Writer, opts *tree.Options) error { - dirs, err := walk.NewDirTree(fsrc, "", false, opts.DeepLevel) + dirs, err := walk.NewDirTree(context.Background(), fsrc, "", false, opts.DeepLevel) if err != nil { return err } @@ -165,7 +166,7 @@ func (to *FileInfo) Mode() os.FileMode { // ModTime is modification time func (to *FileInfo) ModTime() time.Time { - return to.entry.ModTime() + return to.entry.ModTime(context.Background()) } // IsDir is abbreviation for Mode().IsDir() diff --git a/fs/accounting/stats.go b/fs/accounting/stats.go index f0c18ab26..8c7e2db8f 100644 --- a/fs/accounting/stats.go +++ b/fs/accounting/stats.go @@ -2,6 +2,7 @@ package accounting import ( "bytes" + "context" "fmt" "strings" "sync" @@ -101,7 +102,7 @@ func NewStats() *StatsInfo { } // RemoteStats returns stats for rc -func (s *StatsInfo) RemoteStats(in rc.Params) (out rc.Params, err error) { +func (s *StatsInfo) RemoteStats(ctx context.Context, in rc.Params) (out rc.Params, err error) { out = make(rc.Params) s.mu.RLock() dt := time.Now().Sub(s.start) diff --git a/fs/accounting/token_bucket.go b/fs/accounting/token_bucket.go index e71091ecf..a59268a31 100644 --- a/fs/accounting/token_bucket.go +++ b/fs/accounting/token_bucket.go @@ -132,7 +132,7 @@ func SetBwLimit(bandwidth fs.SizeSuffix) { func init() { rc.Add(rc.Call{ Path: "core/bwlimit", - Fn: func(in rc.Params) (out rc.Params, err error) { + Fn: func(ctx context.Context, in rc.Params) (out rc.Params, err error) { ibwlimit, ok := in["rate"] if !ok { return out, errors.Errorf("parameter rate not found") diff --git a/fs/chunkedreader/chunkedreader.go b/fs/chunkedreader/chunkedreader.go index 5dcc9fd90..55cbb4f82 100644 --- a/fs/chunkedreader/chunkedreader.go +++ b/fs/chunkedreader/chunkedreader.go @@ -1,6 +1,7 @@ package chunkedreader import ( + "context" "errors" "io" "sync" @@ -19,6 +20,7 @@ var ( // // A initialChunkSize of <= 0 will disable chunked reading. type ChunkedReader struct { + ctx context.Context mu sync.Mutex // protects following fields o fs.Object // source to read from rc io.ReadCloser // reader for the current open chunk @@ -37,7 +39,7 @@ type ChunkedReader struct { // If maxChunkSize is greater than initialChunkSize, the chunk size will be // doubled after each chunk read with a maximun of maxChunkSize. // A Seek or RangeSeek will reset the chunk size to it's initial value -func New(o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader { +func New(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader { if initialChunkSize <= 0 { initialChunkSize = -1 } @@ -45,6 +47,7 @@ func New(o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader maxChunkSize = initialChunkSize } return &ChunkedReader{ + ctx: ctx, o: o, offset: -1, chunkSize: initialChunkSize, @@ -129,14 +132,14 @@ func (cr *ChunkedReader) Close() error { // Seek the file - for details see io.Seeker func (cr *ChunkedReader) Seek(offset int64, whence int) (int64, error) { - return cr.RangeSeek(offset, whence, -1) + return cr.RangeSeek(context.TODO(), offset, whence, -1) } // RangeSeek the file - for details see RangeSeeker // // The specified length will only apply to the next chunk opened. // RangeSeek will not reopen the source until Read is called. -func (cr *ChunkedReader) RangeSeek(offset int64, whence int, length int64) (int64, error) { +func (cr *ChunkedReader) RangeSeek(ctx context.Context, offset int64, whence int, length int64) (int64, error) { cr.mu.Lock() defer cr.mu.Unlock() @@ -196,7 +199,7 @@ func (cr *ChunkedReader) openRange() error { } if rs, ok := cr.rc.(fs.RangeSeeker); ok { - n, err := rs.RangeSeek(offset, io.SeekStart, length) + n, err := rs.RangeSeek(cr.ctx, offset, io.SeekStart, length) if err == nil && n == offset { cr.offset = offset return nil @@ -212,12 +215,12 @@ func (cr *ChunkedReader) openRange() error { var err error if length <= 0 { if offset == 0 { - rc, err = cr.o.Open() + rc, err = cr.o.Open(cr.ctx) } else { - rc, err = cr.o.Open(&fs.RangeOption{Start: offset, End: -1}) + rc, err = cr.o.Open(cr.ctx, &fs.RangeOption{Start: offset, End: -1}) } } else { - rc, err = cr.o.Open(&fs.RangeOption{Start: offset, End: offset + length - 1}) + rc, err = cr.o.Open(cr.ctx, &fs.RangeOption{Start: offset, End: offset + length - 1}) } if err != nil { return err diff --git a/fs/chunkedreader/chunkedreader_test.go b/fs/chunkedreader/chunkedreader_test.go index 71a0a449e..467def510 100644 --- a/fs/chunkedreader/chunkedreader_test.go +++ b/fs/chunkedreader/chunkedreader_test.go @@ -1,6 +1,7 @@ package chunkedreader import ( + "context" "fmt" "io" "math/rand" @@ -38,13 +39,13 @@ func testRead(content []byte, mode mockobject.SeekMode) func(*testing.T) { } t.Run(fmt.Sprintf("Chunksize_%d_%d", cs, csMax), func(t *testing.T) { - cr := New(o, cs, csMax) + cr := New(context.Background(), o, cs, csMax) for _, offset := range offsets { for _, limit := range limits { what := fmt.Sprintf("offset %d, limit %d", offset, limit) - p, err := cr.RangeSeek(offset, io.SeekStart, limit) + p, err := cr.RangeSeek(context.Background(), offset, io.SeekStart, limit) if offset >= cl { require.Error(t, err, what) return @@ -78,27 +79,27 @@ func TestErrorAfterClose(t *testing.T) { o := mockobject.New("test.bin").WithContent(content, mockobject.SeekModeNone) // Close - cr := New(o, 0, 0) + cr := New(context.Background(), o, 0, 0) require.NoError(t, cr.Close()) require.Error(t, cr.Close()) // Read - cr = New(o, 0, 0) + cr = New(context.Background(), o, 0, 0) require.NoError(t, cr.Close()) var buf [1]byte _, err := cr.Read(buf[:]) require.Error(t, err) // Seek - cr = New(o, 0, 0) + cr = New(context.Background(), o, 0, 0) require.NoError(t, cr.Close()) _, err = cr.Seek(1, io.SeekCurrent) require.Error(t, err) // RangeSeek - cr = New(o, 0, 0) + cr = New(context.Background(), o, 0, 0) require.NoError(t, cr.Close()) - _, err = cr.RangeSeek(1, io.SeekCurrent, 0) + _, err = cr.RangeSeek(context.Background(), 1, io.SeekCurrent, 0) require.Error(t, err) } diff --git a/fs/config/rc.go b/fs/config/rc.go index 68e3b2b4c..eaf6ac404 100644 --- a/fs/config/rc.go +++ b/fs/config/rc.go @@ -1,6 +1,8 @@ package config import ( + "context" + "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/rc" ) @@ -23,7 +25,7 @@ See the [config dump command](/commands/rclone_config_dump/) command for more in } // Return the config file dump -func rcDump(in rc.Params) (out rc.Params, err error) { +func rcDump(ctx context.Context, in rc.Params) (out rc.Params, err error) { return DumpRcBlob(), nil } @@ -43,7 +45,7 @@ See the [config dump command](/commands/rclone_config_dump/) command for more in } // Return the config file get -func rcGet(in rc.Params) (out rc.Params, err error) { +func rcGet(ctx context.Context, in rc.Params) (out rc.Params, err error) { name, err := in.GetString("name") if err != nil { return nil, err @@ -67,7 +69,7 @@ See the [listremotes command](/commands/rclone_listremotes/) command for more in } // Return the a list of remotes in the config file -func rcListRemotes(in rc.Params) (out rc.Params, err error) { +func rcListRemotes(ctx context.Context, in rc.Params) (out rc.Params, err error) { var remotes = []string{} for _, remote := range getConfigData().GetSectionList() { remotes = append(remotes, remote) @@ -94,7 +96,7 @@ See the [config providers command](/commands/rclone_config_providers/) command f } // Return the config file providers -func rcProviders(in rc.Params) (out rc.Params, err error) { +func rcProviders(ctx context.Context, in rc.Params) (out rc.Params, err error) { out = rc.Params{ "providers": fs.Registry, } @@ -111,8 +113,8 @@ func init() { rc.Add(rc.Call{ Path: "config/" + name, AuthRequired: true, - Fn: func(in rc.Params) (rc.Params, error) { - return rcConfig(in, name) + Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) { + return rcConfig(ctx, in, name) }, Title: name + " the config for a remote.", Help: `This takes the following parameters @@ -126,7 +128,7 @@ See the [config ` + name + ` command](/commands/rclone_config_` + name + `/) com } // Manipulate the config file -func rcConfig(in rc.Params, what string) (out rc.Params, err error) { +func rcConfig(ctx context.Context, in rc.Params, what string) (out rc.Params, err error) { name, err := in.GetString("name") if err != nil { return nil, err @@ -167,7 +169,7 @@ See the [config delete command](/commands/rclone_config_delete/) command for mor } // Return the config file delete -func rcDelete(in rc.Params) (out rc.Params, err error) { +func rcDelete(ctx context.Context, in rc.Params) (out rc.Params, err error) { name, err := in.GetString("name") if err != nil { return nil, err diff --git a/fs/config/rc_test.go b/fs/config/rc_test.go index bfecbd29f..44c60bc0b 100644 --- a/fs/config/rc_test.go +++ b/fs/config/rc_test.go @@ -1,6 +1,7 @@ package config import ( + "context" "testing" _ "github.com/ncw/rclone/backend/local" @@ -24,7 +25,7 @@ func TestRc(t *testing.T) { "test_key": "sausage", }, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.Nil(t, out) assert.Equal(t, "local", FileGet(testName, "type")) @@ -37,7 +38,7 @@ func TestRc(t *testing.T) { call := rc.Calls.Get("config/dump") assert.NotNil(t, call) in := rc.Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) @@ -54,7 +55,7 @@ func TestRc(t *testing.T) { in := rc.Params{ "name": testName, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) @@ -66,7 +67,7 @@ func TestRc(t *testing.T) { call := rc.Calls.Get("config/listremotes") assert.NotNil(t, call) in := rc.Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) @@ -87,7 +88,7 @@ func TestRc(t *testing.T) { "test_key2": "cabbage", }, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Nil(t, out) @@ -106,7 +107,7 @@ func TestRc(t *testing.T) { "test_key2": "cabbage", }, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Nil(t, out) @@ -121,7 +122,7 @@ func TestRc(t *testing.T) { in = rc.Params{ "name": testName, } - out, err = call.Fn(in) + out, err = call.Fn(context.Background(), in) require.NoError(t, err) assert.Nil(t, out) assert.Equal(t, "", FileGet(testName, "type")) @@ -132,7 +133,7 @@ func TestRcProviders(t *testing.T) { call := rc.Calls.Get("config/providers") assert.NotNil(t, call) in := rc.Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) var registry []*fs.RegInfo diff --git a/fs/dir.go b/fs/dir.go index c90c1222f..d876d89a6 100644 --- a/fs/dir.go +++ b/fs/dir.go @@ -1,6 +1,9 @@ package fs -import "time" +import ( + "context" + "time" +) // Dir describes an unspecialized directory for directory/container/bucket lists type Dir struct { @@ -22,10 +25,10 @@ func NewDir(remote string, modTime time.Time) *Dir { } // NewDirCopy creates an unspecialized copy of the Directory object passed in -func NewDirCopy(d Directory) *Dir { +func NewDirCopy(ctx context.Context, d Directory) *Dir { return &Dir{ remote: d.Remote(), - modTime: d.ModTime(), + modTime: d.ModTime(ctx), size: d.Size(), items: d.Items(), id: d.ID(), @@ -61,7 +64,7 @@ func (d *Dir) SetID(id string) *Dir { // ModTime returns the modification date of the file // It should return a best guess if one isn't available -func (d *Dir) ModTime() time.Time { +func (d *Dir) ModTime(ctx context.Context) time.Time { if !d.modTime.IsZero() { return d.modTime } diff --git a/fs/filter/filter.go b/fs/filter/filter.go index a5d58cf0c..b98356a21 100644 --- a/fs/filter/filter.go +++ b/fs/filter/filter.go @@ -3,6 +3,7 @@ package filter import ( "bufio" + "context" "fmt" "log" "os" @@ -399,12 +400,12 @@ func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool { // IncludeDirectory returns a function which checks whether this // directory should be included in the sync or not. -func (f *Filter) IncludeDirectory(fs fs.Fs) func(string) (bool, error) { +func (f *Filter) IncludeDirectory(ctx context.Context, fs fs.Fs) func(string) (bool, error) { return func(remote string) (bool, error) { remote = strings.Trim(remote, "/") // first check if we need to remove directory based on // the exclude file - excl, err := f.DirContainsExcludeFile(fs, remote) + excl, err := f.DirContainsExcludeFile(ctx, fs, remote) if err != nil { return false, err } @@ -431,9 +432,9 @@ func (f *Filter) IncludeDirectory(fs fs.Fs) func(string) (bool, error) { // DirContainsExcludeFile checks if exclude file is present in a // directroy. If fs is nil, it works properly if ExcludeFile is an // empty string (for testing). -func (f *Filter) DirContainsExcludeFile(fremote fs.Fs, remote string) (bool, error) { +func (f *Filter) DirContainsExcludeFile(ctx context.Context, fremote fs.Fs, remote string) (bool, error) { if len(f.Opt.ExcludeFile) > 0 { - exists, err := fs.FileExists(fremote, path.Join(remote, f.Opt.ExcludeFile)) + exists, err := fs.FileExists(ctx, fremote, path.Join(remote, f.Opt.ExcludeFile)) if err != nil { return false, err } @@ -470,11 +471,11 @@ func (f *Filter) Include(remote string, size int64, modTime time.Time) bool { // IncludeObject returns whether this object should be included into // the sync or not. This is a convenience function to avoid calling // o.ModTime(), which is an expensive operation. -func (f *Filter) IncludeObject(o fs.Object) bool { +func (f *Filter) IncludeObject(ctx context.Context, o fs.Object) bool { var modTime time.Time if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() { - modTime = o.ModTime() + modTime = o.ModTime(ctx) } else { modTime = time.Unix(0, 0) } @@ -534,8 +535,8 @@ func (f *Filter) HaveFilesFrom() bool { var errFilesFromNotSet = errors.New("--files-from not set so can't use Filter.ListR") // MakeListR makes function to return all the files set using --files-from -func (f *Filter) MakeListR(NewObject func(remote string) (fs.Object, error)) fs.ListRFn { - return func(dir string, callback fs.ListRCallback) error { +func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Context, remote string) (fs.Object, error)) fs.ListRFn { + return func(ctx context.Context, dir string, callback fs.ListRCallback) error { if !f.HaveFilesFrom() { return errFilesFromNotSet } @@ -547,7 +548,7 @@ func (f *Filter) MakeListR(NewObject func(remote string) (fs.Object, error)) fs. g.Go(func() (err error) { var entries = make(fs.DirEntries, 1) for remote := range remotes { - entries[0], err = NewObject(remote) + entries[0], err = NewObject(ctx, remote) if err == fs.ErrorObjectNotFound { // Skip files that are not found } else if err != nil { diff --git a/fs/filter/filter_test.go b/fs/filter/filter_test.go index 629165be0..0cdb820cc 100644 --- a/fs/filter/filter_test.go +++ b/fs/filter/filter_test.go @@ -1,6 +1,7 @@ package filter import ( + "context" "fmt" "io/ioutil" "os" @@ -159,7 +160,7 @@ type includeDirTest struct { func testDirInclude(t *testing.T, f *Filter, tests []includeDirTest) { for _, test := range tests { - got, err := f.IncludeDirectory(nil)(test.in) + got, err := f.IncludeDirectory(context.Background(), nil)(test.in) require.NoError(t, err) assert.Equal(t, test.want, got, test.in) } @@ -235,8 +236,8 @@ func TestNewFilterMakeListR(t *testing.T) { require.NoError(t, err) // Check error if no files - listR := f.MakeListR(nil) - err = listR("", nil) + listR := f.MakeListR(context.Background(), nil) + err = listR(context.Background(), "", nil) assert.EqualError(t, err, errFilesFromNotSet.Error()) // Add some files @@ -256,7 +257,7 @@ func TestNewFilterMakeListR(t *testing.T) { // NewObject function for MakeListR newObjects := FilesMap{} var newObjectMu sync.Mutex - NewObject := func(remote string) (fs.Object, error) { + NewObject := func(ctx context.Context, remote string) (fs.Object, error) { newObjectMu.Lock() defer newObjectMu.Unlock() if remote == "notfound" { @@ -282,8 +283,8 @@ func TestNewFilterMakeListR(t *testing.T) { } // Make the listR and call it - listR = f.MakeListR(NewObject) - err = listR("", listRcallback) + listR = f.MakeListR(context.Background(), NewObject) + err = listR(context.Background(), "", listRcallback) require.NoError(t, err) // Check that the correct objects were created and listed @@ -298,7 +299,7 @@ func TestNewFilterMakeListR(t *testing.T) { // Now check an error is returned from NewObject require.NoError(t, f.AddFile("error")) - err = listR("", listRcallback) + err = listR(context.Background(), "", listRcallback) require.EqualError(t, err, assert.AnError.Error()) } diff --git a/fs/fs.go b/fs/fs.go index aa055d663..19ce9d56b 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -2,6 +2,7 @@ package fs import ( + "context" "encoding/json" "fmt" "io" @@ -245,11 +246,11 @@ type Fs interface { // // This should return ErrDirNotFound if the directory isn't // found. - List(dir string) (entries DirEntries, err error) + List(ctx context.Context, dir string) (entries DirEntries, err error) // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. - NewObject(remote string) (Object, error) + NewObject(ctx context.Context, remote string) (Object, error) // Put in to the remote path with the modTime given of the given size // @@ -260,17 +261,17 @@ type Fs interface { // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error - Put(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) + Put(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists - Mkdir(dir string) error + Mkdir(ctx context.Context, dir string) error // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty - Rmdir(dir string) error + Rmdir(ctx context.Context, dir string) error } // Info provides a read only interface to information about a filesystem. @@ -299,20 +300,20 @@ type Object interface { ObjectInfo // SetModTime sets the metadata on the object to set the modification date - SetModTime(time.Time) error + SetModTime(ctx context.Context, t time.Time) error // Open opens the file for read. Call Close() on the returned io.ReadCloser - Open(options ...OpenOption) (io.ReadCloser, error) + Open(ctx context.Context, options ...OpenOption) (io.ReadCloser, error) // Update in to the object with the modTime given of the given size // // When called from outside a Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). - Update(in io.Reader, src ObjectInfo, options ...OpenOption) error + Update(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) error // Removes this object - Remove() error + Remove(ctx context.Context) error } // ObjectInfo provides read only information about an object. @@ -324,7 +325,7 @@ type ObjectInfo interface { // Hash returns the selected checksum of the file // If no checksum is available it returns "" - Hash(hash.Type) (string, error) + Hash(ctx context.Context, ty hash.Type) (string, error) // Storable says whether this object can be stored Storable() bool @@ -342,7 +343,7 @@ type DirEntry interface { // ModTime returns the modification date of the file // It should return a best guess if one isn't available - ModTime() time.Time + ModTime(context.Context) time.Time // Size returns the size of the file Size() int64 @@ -365,7 +366,7 @@ type Directory interface { type MimeTyper interface { // MimeType returns the content type of the Object if // known, or "" if not - MimeType() string + MimeType(ctx context.Context) string } // IDer is an optional interface for Object @@ -430,7 +431,7 @@ func ObjectOptionalInterfaces(o Object) (supported, unsupported []string) { type ListRCallback func(entries DirEntries) error // ListRFn is defines the call used to recursively list a directory -type ListRFn func(dir string, callback ListRCallback) error +type ListRFn func(ctx context.Context, dir string, callback ListRCallback) error // NewUsageValue makes a valid value func NewUsageValue(value int64) *int64 { @@ -476,7 +477,7 @@ type Features struct { // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist - Purge func() error + Purge func(ctx context.Context) error // Copy src to this remote using server side copy operations. // @@ -487,7 +488,7 @@ type Features struct { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy - Copy func(src Object, remote string) (Object, error) + Copy func(ctx context.Context, src Object, remote string) (Object, error) // Move src to this remote using server side move operations. // @@ -498,7 +499,7 @@ type Features struct { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove - Move func(src Object, remote string) (Object, error) + Move func(ctx context.Context, src Object, remote string) (Object, error) // DirMove moves src, srcRemote to this remote at dstRemote // using server side move operations. @@ -508,12 +509,12 @@ type Features struct { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists - DirMove func(src Fs, srcRemote, dstRemote string) error + DirMove func(ctx context.Context, src Fs, srcRemote, dstRemote string) error // ChangeNotify calls the passed function with a path // that has had changes. If the implementation // uses polling, it should adhere to the given interval. - ChangeNotify func(func(string, EntryType), <-chan time.Duration) + ChangeNotify func(context.Context, func(string, EntryType), <-chan time.Duration) // UnWrap returns the Fs that this Fs is wrapping UnWrap func() Fs @@ -529,7 +530,7 @@ type Features struct { DirCacheFlush func() // PublicLink generates a public link to the remote path (usually readable by anyone) - PublicLink func(remote string) (string, error) + PublicLink func(ctx context.Context, remote string) (string, error) // Put in to the remote path with the modTime given of the given size // @@ -539,24 +540,24 @@ type Features struct { // // May create duplicates or return errors if src already // exists. - PutUnchecked func(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) + PutUnchecked func(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) // PutStream uploads to the remote path with the modTime given of indeterminate size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error - PutStream func(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) + PutStream func(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. - MergeDirs func([]Directory) error + MergeDirs func(ctx context.Context, dirs []Directory) error // CleanUp the trash in the Fs // // Implement this if you have a way of emptying the trash or // otherwise cleaning up old versions of files. - CleanUp func() error + CleanUp func(ctx context.Context) error // ListR lists the objects and directories of the Fs starting // from dir recursively into out. @@ -577,14 +578,14 @@ type Features struct { ListR ListRFn // About gets quota information from the Fs - About func() (*Usage, error) + About func(ctx context.Context) (*Usage, error) // OpenWriterAt opens with a handle for random access writes // // Pass in the remote desired and the size if known. // // It truncates any existing object - OpenWriterAt func(remote string, size int64) (WriterAtCloser, error) + OpenWriterAt func(ctx context.Context, remote string, size int64) (WriterAtCloser, error) } // Disable nil's out the named feature. If it isn't found then it @@ -803,7 +804,7 @@ type Purger interface { // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist - Purge() error + Purge(ctx context.Context) error } // Copier is an optional interface for Fs @@ -817,7 +818,7 @@ type Copier interface { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy - Copy(src Object, remote string) (Object, error) + Copy(ctx context.Context, src Object, remote string) (Object, error) } // Mover is an optional interface for Fs @@ -831,7 +832,7 @@ type Mover interface { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove - Move(src Object, remote string) (Object, error) + Move(ctx context.Context, src Object, remote string) (Object, error) } // DirMover is an optional interface for Fs @@ -844,7 +845,7 @@ type DirMover interface { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists - DirMove(src Fs, srcRemote, dstRemote string) error + DirMove(ctx context.Context, src Fs, srcRemote, dstRemote string) error } // ChangeNotifier is an optional interface for Fs @@ -858,7 +859,7 @@ type ChangeNotifier interface { // The ChangeNotify implementation must empty the channel // regularly. When the channel gets closed, the implementation // should stop polling and release resources. - ChangeNotify(func(string, EntryType), <-chan time.Duration) + ChangeNotify(context.Context, func(string, EntryType), <-chan time.Duration) } // UnWrapper is an optional interfaces for Fs @@ -892,7 +893,7 @@ type PutUncheckeder interface { // // May create duplicates or return errors if src already // exists. - PutUnchecked(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) + PutUnchecked(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) } // PutStreamer is an optional interface for Fs @@ -902,20 +903,20 @@ type PutStreamer interface { // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error - PutStream(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) + PutStream(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) } // PublicLinker is an optional interface for Fs type PublicLinker interface { // PublicLink generates a public link to the remote path (usually readable by anyone) - PublicLink(remote string) (string, error) + PublicLink(ctx context.Context, remote string) (string, error) } // MergeDirser is an option interface for Fs type MergeDirser interface { // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. - MergeDirs([]Directory) error + MergeDirs(ctx context.Context, dirs []Directory) error } // CleanUpper is an optional interfaces for Fs @@ -924,7 +925,7 @@ type CleanUpper interface { // // Implement this if you have a way of emptying the trash or // otherwise cleaning up old versions of files. - CleanUp() error + CleanUp(ctx context.Context) error } // ListRer is an optional interfaces for Fs @@ -945,7 +946,7 @@ type ListRer interface { // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. - ListR(dir string, callback ListRCallback) error + ListR(ctx context.Context, dir string, callback ListRCallback) error } // RangeSeeker is the interface that wraps the RangeSeek method. @@ -958,13 +959,13 @@ type RangeSeeker interface { // limiting the total length to limit. // // RangeSeek with a limit of < 0 is equivalent to a regular Seek. - RangeSeek(offset int64, whence int, length int64) (int64, error) + RangeSeek(ctx context.Context, offset int64, whence int, length int64) (int64, error) } // Abouter is an optional interface for Fs type Abouter interface { // About gets quota information from the Fs - About() (*Usage, error) + About(ctx context.Context) (*Usage, error) } // OpenWriterAter is an optional interface for Fs @@ -974,7 +975,7 @@ type OpenWriterAter interface { // Pass in the remote desired and the size if known. // // It truncates any existing object - OpenWriterAt(remote string, size int64) (WriterAtCloser, error) + OpenWriterAt(ctx context.Context, remote string, size int64) (WriterAtCloser, error) } // ObjectsChan is a channel of Objects @@ -1195,8 +1196,8 @@ func CheckClose(c io.Closer, err *error) { // FileExists returns true if a file remote exists. // If remote is a directory, FileExists returns false. -func FileExists(fs Fs, remote string) (bool, error) { - _, err := fs.NewObject(remote) +func FileExists(ctx context.Context, fs Fs, remote string) (bool, error) { + _, err := fs.NewObject(ctx, remote) if err != nil { if err == ErrorObjectNotFound || err == ErrorNotAFile || err == ErrorPermissionDenied { return false, nil diff --git a/fs/fs_test.go b/fs/fs_test.go index aaaa20502..23e29259f 100644 --- a/fs/fs_test.go +++ b/fs/fs_test.go @@ -1,6 +1,7 @@ package fs import ( + "context" "strings" "sync" "testing" @@ -17,7 +18,7 @@ import ( func TestFeaturesDisable(t *testing.T) { ft := new(Features) - ft.Copy = func(src Object, remote string) (Object, error) { + ft.Copy = func(ctx context.Context, src Object, remote string) (Object, error) { return nil, nil } ft.CaseInsensitive = true @@ -44,7 +45,7 @@ func TestFeaturesList(t *testing.T) { func TestFeaturesEnabled(t *testing.T) { ft := new(Features) ft.CaseInsensitive = true - ft.Purge = func() error { return nil } + ft.Purge = func(ctx context.Context) error { return nil } enabled := ft.Enabled() flag, ok := enabled["CaseInsensitive"] @@ -68,7 +69,7 @@ func TestFeaturesEnabled(t *testing.T) { func TestFeaturesDisableList(t *testing.T) { ft := new(Features) - ft.Copy = func(src Object, remote string) (Object, error) { + ft.Copy = func(ctx context.Context, src Object, remote string) (Object, error) { return nil, nil } ft.CaseInsensitive = true diff --git a/fs/list/list.go b/fs/list/list.go index 88b6773e0..83abdbfc2 100644 --- a/fs/list/list.go +++ b/fs/list/list.go @@ -2,6 +2,7 @@ package list import ( + "context" "sort" "strings" @@ -18,9 +19,9 @@ import ( // files and directories passing the filter will be added. // // Files will be returned in sorted order -func DirSorted(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) { +func DirSorted(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) { // Get unfiltered entries from the fs - entries, err = f.List(dir) + entries, err = f.List(ctx, dir) if err != nil { return nil, err } @@ -31,12 +32,12 @@ func DirSorted(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err fs.Debugf(dir, "Excluded") return nil, nil } - return filterAndSortDir(entries, includeAll, dir, filter.Active.IncludeObject, filter.Active.IncludeDirectory(f)) + return filterAndSortDir(ctx, entries, includeAll, dir, filter.Active.IncludeObject, filter.Active.IncludeDirectory(ctx, f)) } // filter (if required) and check the entries, then sort them -func filterAndSortDir(entries fs.DirEntries, includeAll bool, dir string, - IncludeObject func(o fs.Object) bool, +func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll bool, dir string, + IncludeObject func(ctx context.Context, o fs.Object) bool, IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) { newEntries = entries[:0] // in place filter prefix := "" @@ -49,7 +50,7 @@ func filterAndSortDir(entries fs.DirEntries, includeAll bool, dir string, switch x := entry.(type) { case fs.Object: // Make sure we don't delete excluded files if not required - if !includeAll && !IncludeObject(x) { + if !includeAll && !IncludeObject(ctx, x) { ok = false fs.Debugf(x, "Excluded") } diff --git a/fs/list/list_test.go b/fs/list/list_test.go index f6d8bdf18..80858b32d 100644 --- a/fs/list/list_test.go +++ b/fs/list/list_test.go @@ -1,6 +1,7 @@ package list import ( + "context" "testing" "time" @@ -24,21 +25,21 @@ func TestFilterAndSortIncludeAll(t *testing.T) { dd := mockdir.New("d") oD := mockobject.Object("D") entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD} - includeObject := func(o fs.Object) bool { + includeObject := func(ctx context.Context, o fs.Object) bool { return o != oB } includeDirectory := func(remote string) (bool, error) { return remote != "c", nil } // no filter - newEntries, err := filterAndSortDir(entries, true, "", includeObject, includeDirectory) + newEntries, err := filterAndSortDir(context.Background(), entries, true, "", includeObject, includeDirectory) require.NoError(t, err) assert.Equal(t, newEntries, fs.DirEntries{oA, oB, oC, oD, da, db, dc, dd}, ) // filter - newEntries, err = filterAndSortDir(entries, false, "", includeObject, includeDirectory) + newEntries, err = filterAndSortDir(context.Background(), entries, false, "", includeObject, includeDirectory) require.NoError(t, err) assert.Equal(t, newEntries, @@ -57,7 +58,7 @@ func TestFilterAndSortCheckDir(t *testing.T) { dd := mockdir.New("dir/d") oD := mockobject.Object("dir/D") entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD} - newEntries, err := filterAndSortDir(entries, true, "dir", nil, nil) + newEntries, err := filterAndSortDir(context.Background(), entries, true, "dir", nil, nil) require.NoError(t, err) assert.Equal(t, newEntries, @@ -76,7 +77,7 @@ func TestFilterAndSortCheckDirRoot(t *testing.T) { dd := mockdir.New("d") oD := mockobject.Object("D") entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD} - newEntries, err := filterAndSortDir(entries, true, "", nil, nil) + newEntries, err := filterAndSortDir(context.Background(), entries, true, "", nil, nil) require.NoError(t, err) assert.Equal(t, newEntries, @@ -86,10 +87,10 @@ func TestFilterAndSortCheckDirRoot(t *testing.T) { type unknownDirEntry string -func (o unknownDirEntry) String() string { return string(o) } -func (o unknownDirEntry) Remote() string { return string(o) } -func (o unknownDirEntry) ModTime() (t time.Time) { return t } -func (o unknownDirEntry) Size() int64 { return 0 } +func (o unknownDirEntry) String() string { return string(o) } +func (o unknownDirEntry) Remote() string { return string(o) } +func (o unknownDirEntry) ModTime(ctx context.Context) (t time.Time) { return t } +func (o unknownDirEntry) Size() int64 { return 0 } func TestFilterAndSortUnknown(t *testing.T) { // Check that an unknown entry produces an error @@ -98,7 +99,7 @@ func TestFilterAndSortUnknown(t *testing.T) { ub := unknownDirEntry("b") oB := mockobject.Object("B/sub") entries := fs.DirEntries{da, oA, ub, oB} - newEntries, err := filterAndSortDir(entries, true, "", nil, nil) + newEntries, err := filterAndSortDir(context.Background(), entries, true, "", nil, nil) assert.Error(t, err, "error") assert.Nil(t, newEntries) } diff --git a/fs/march/march.go b/fs/march/march.go index 0009d6695..45eb4c9ca 100644 --- a/fs/march/march.go +++ b/fs/march/march.go @@ -40,7 +40,7 @@ type Marcher interface { // DstOnly is called for a DirEntry found only in the destination DstOnly(dst fs.DirEntry) (recurse bool) // Match is called for a DirEntry found both in the source and destination - Match(dst, src fs.DirEntry) (recurse bool) + Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) } // init sets up a march over opt.Fsrc, and opt.Fdst calling back callback for each match @@ -70,7 +70,7 @@ type listDirFn func(dir string) (entries fs.DirEntries, err error) func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn { if (!fs.Config.UseListR || f.Features().ListR == nil) && !filter.Active.HaveFilesFrom() { return func(dir string) (entries fs.DirEntries, err error) { - return list.DirSorted(f, includeAll, dir) + return list.DirSorted(m.Ctx, f, includeAll, dir) } } var ( @@ -83,7 +83,7 @@ func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn { mu.Lock() defer mu.Unlock() if !started { - dirs, dirsErr = walk.NewDirTree(f, m.Dir, includeAll, fs.Config.MaxDepth) + dirs, dirsErr = walk.NewDirTree(m.Ctx, f, m.Dir, includeAll, fs.Config.MaxDepth) started = true } if dirsErr != nil { @@ -383,7 +383,7 @@ func (m *March) processJob(job listDirJob) (jobs []listDirJob) { for _, src := range srcList { if srcObj, ok := src.(fs.Object); ok { leaf := path.Base(srcObj.Remote()) - dstObj, err := m.Fdst.NewObject(path.Join(job.dstRemote, leaf)) + dstObj, err := m.Fdst.NewObject(m.Ctx, path.Join(job.dstRemote, leaf)) if err == nil { dstList = append(dstList, dstObj) } @@ -424,7 +424,7 @@ func (m *March) processJob(job listDirJob) (jobs []listDirJob) { if m.aborting() { return nil } - recurse := m.Callback.Match(match.dst, match.src) + recurse := m.Callback.Match(m.Ctx, match.dst, match.src) if recurse && job.srcDepth > 0 && job.dstDepth > 0 { jobs = append(jobs, listDirJob{ srcRemote: match.src.Remote(), diff --git a/fs/mimetype.go b/fs/mimetype.go index 7323adf1f..50ea7dc59 100644 --- a/fs/mimetype.go +++ b/fs/mimetype.go @@ -1,6 +1,7 @@ package fs import ( + "context" "mime" "path" "strings" @@ -17,10 +18,10 @@ func MimeTypeFromName(remote string) (mimeType string) { // MimeType returns the MimeType from the object, either by calling // the MimeTyper interface or using MimeTypeFromName -func MimeType(o ObjectInfo) (mimeType string) { +func MimeType(ctx context.Context, o ObjectInfo) (mimeType string) { // Read the MimeType from the optional interface if available if do, ok := o.(MimeTyper); ok { - mimeType = do.MimeType() + mimeType = do.MimeType(ctx) // Debugf(o, "Read MimeType as %q", mimeType) if mimeType != "" { return mimeType @@ -33,10 +34,10 @@ func MimeType(o ObjectInfo) (mimeType string) { // // It returns "inode/directory" for directories, or uses // MimeType(Object) -func MimeTypeDirEntry(item DirEntry) string { +func MimeTypeDirEntry(ctx context.Context, item DirEntry) string { switch x := item.(type) { case Object: - return MimeType(x) + return MimeType(ctx, x) case Directory: return "inode/directory" } diff --git a/fs/object/object.go b/fs/object/object.go index e11827b9b..38aa463e9 100644 --- a/fs/object/object.go +++ b/fs/object/object.go @@ -3,6 +3,7 @@ package object import ( "bytes" + "context" "errors" "io" "io/ioutil" @@ -43,13 +44,13 @@ type staticObjectInfo struct { fs fs.Info } -func (i *staticObjectInfo) Fs() fs.Info { return i.fs } -func (i *staticObjectInfo) Remote() string { return i.remote } -func (i *staticObjectInfo) String() string { return i.remote } -func (i *staticObjectInfo) ModTime() time.Time { return i.modTime } -func (i *staticObjectInfo) Size() int64 { return i.size } -func (i *staticObjectInfo) Storable() bool { return i.storable } -func (i *staticObjectInfo) Hash(h hash.Type) (string, error) { +func (i *staticObjectInfo) Fs() fs.Info { return i.fs } +func (i *staticObjectInfo) Remote() string { return i.remote } +func (i *staticObjectInfo) String() string { return i.remote } +func (i *staticObjectInfo) ModTime(ctx context.Context) time.Time { return i.modTime } +func (i *staticObjectInfo) Size() int64 { return i.size } +func (i *staticObjectInfo) Storable() bool { return i.storable } +func (i *staticObjectInfo) Hash(ctx context.Context, h hash.Type) (string, error) { if len(i.hashes) == 0 { return "", hash.ErrUnsupported } @@ -92,13 +93,13 @@ func (memoryFs) Features() *fs.Features { return &fs.Features{} } // // This should return ErrDirNotFound if the directory isn't // found. -func (memoryFs) List(dir string) (entries fs.DirEntries, err error) { +func (memoryFs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return nil, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. -func (memoryFs) NewObject(remote string) (fs.Object, error) { +func (memoryFs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return nil, fs.ErrorObjectNotFound } @@ -107,22 +108,22 @@ func (memoryFs) NewObject(remote string) (fs.Object, error) { // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error -func (memoryFs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - o := NewMemoryObject(src.Remote(), src.ModTime(), nil) - return o, o.Update(in, src, options...) +func (memoryFs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + o := NewMemoryObject(src.Remote(), src.ModTime(ctx), nil) + return o, o.Update(ctx, in, src, options...) } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists -func (memoryFs) Mkdir(dir string) error { +func (memoryFs) Mkdir(ctx context.Context, dir string) error { return errors.New("memoryFs: can't make directory") } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty -func (memoryFs) Rmdir(dir string) error { +func (memoryFs) Rmdir(ctx context.Context, dir string) error { return fs.ErrorDirNotFound } @@ -165,7 +166,7 @@ func (o *MemoryObject) String() string { } // ModTime returns the modification date of the file -func (o *MemoryObject) ModTime() time.Time { +func (o *MemoryObject) ModTime(ctx context.Context) time.Time { return o.modTime } @@ -180,7 +181,7 @@ func (o *MemoryObject) Storable() bool { } // Hash returns the requested hash of the contents -func (o *MemoryObject) Hash(h hash.Type) (string, error) { +func (o *MemoryObject) Hash(ctx context.Context, h hash.Type) (string, error) { hash, err := hash.NewMultiHasherTypes(hash.Set(h)) if err != nil { return "", err @@ -193,13 +194,13 @@ func (o *MemoryObject) Hash(h hash.Type) (string, error) { } // SetModTime sets the metadata on the object to set the modification date -func (o *MemoryObject) SetModTime(modTime time.Time) error { +func (o *MemoryObject) SetModTime(ctx context.Context, modTime time.Time) error { o.modTime = modTime return nil } // Open opens the file for read. Call Close() on the returned io.ReadCloser -func (o *MemoryObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) { +func (o *MemoryObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { content := o.content for _, option := range options { switch x := option.(type) { @@ -219,7 +220,7 @@ func (o *MemoryObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) { // Update in to the object with the modTime given of the given size // // This re-uses the internal buffer if at all possible. -func (o *MemoryObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { +func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { size := src.Size() if size == 0 { o.content = nil @@ -229,11 +230,11 @@ func (o *MemoryObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.Ope o.content = o.content[:size] _, err = io.ReadFull(in, o.content) } - o.modTime = src.ModTime() + o.modTime = src.ModTime(ctx) return err } // Remove this object -func (o *MemoryObject) Remove() error { +func (o *MemoryObject) Remove(ctx context.Context) error { return errors.New("memoryObject.Remove not supported") } diff --git a/fs/object/object_test.go b/fs/object/object_test.go index cc1a17cc1..6eb5d8714 100644 --- a/fs/object/object_test.go +++ b/fs/object/object_test.go @@ -2,6 +2,7 @@ package object_test import ( "bytes" + "context" "io" "io/ioutil" "testing" @@ -23,26 +24,26 @@ func TestStaticObject(t *testing.T) { assert.Equal(t, object.MemoryFs, o.Fs()) assert.Equal(t, remote, o.Remote()) assert.Equal(t, remote, o.String()) - assert.Equal(t, now, o.ModTime()) + assert.Equal(t, now, o.ModTime(context.Background())) assert.Equal(t, size, o.Size()) assert.Equal(t, true, o.Storable()) - Hash, err := o.Hash(hash.MD5) + Hash, err := o.Hash(context.Background(), hash.MD5) assert.NoError(t, err) assert.Equal(t, "", Hash) o = object.NewStaticObjectInfo(remote, now, size, true, nil, nil) - _, err = o.Hash(hash.MD5) + _, err = o.Hash(context.Background(), hash.MD5) assert.Equal(t, hash.ErrUnsupported, err) hs := map[hash.Type]string{ hash.MD5: "potato", } o = object.NewStaticObjectInfo(remote, now, size, true, hs, nil) - Hash, err = o.Hash(hash.MD5) + Hash, err = o.Hash(context.Background(), hash.MD5) assert.NoError(t, err) assert.Equal(t, "potato", Hash) - _, err = o.Hash(hash.SHA1) + _, err = o.Hash(context.Background(), hash.SHA1) assert.Equal(t, hash.ErrUnsupported, err) } @@ -55,27 +56,27 @@ func TestMemoryFs(t *testing.T) { assert.Equal(t, hash.Supported, f.Hashes()) assert.Equal(t, &fs.Features{}, f.Features()) - entries, err := f.List("") + entries, err := f.List(context.Background(), "") assert.NoError(t, err) assert.Nil(t, entries) - o, err := f.NewObject("obj") + o, err := f.NewObject(context.Background(), "obj") assert.Equal(t, fs.ErrorObjectNotFound, err) assert.Nil(t, o) buf := bytes.NewBufferString("potato") now := time.Now() src := object.NewStaticObjectInfo("remote", now, int64(buf.Len()), true, nil, nil) - o, err = f.Put(buf, src) + o, err = f.Put(context.Background(), buf, src) assert.NoError(t, err) - hash, err := o.Hash(hash.SHA1) + hash, err := o.Hash(context.Background(), hash.SHA1) assert.NoError(t, err) assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", hash) - err = f.Mkdir("dir") + err = f.Mkdir(context.Background(), "dir") assert.Error(t, err) - err = f.Rmdir("dir") + err = f.Rmdir(context.Background(), "dir") assert.Equal(t, fs.ErrorDirNotFound, err) } @@ -91,22 +92,22 @@ func TestMemoryObject(t *testing.T) { assert.Equal(t, object.MemoryFs, o.Fs()) assert.Equal(t, remote, o.Remote()) assert.Equal(t, remote, o.String()) - assert.Equal(t, now, o.ModTime()) + assert.Equal(t, now, o.ModTime(context.Background())) assert.Equal(t, int64(len(content)), o.Size()) assert.Equal(t, true, o.Storable()) - Hash, err := o.Hash(hash.MD5) + Hash, err := o.Hash(context.Background(), hash.MD5) assert.NoError(t, err) assert.Equal(t, "8ee2027983915ec78acc45027d874316", Hash) - Hash, err = o.Hash(hash.SHA1) + Hash, err = o.Hash(context.Background(), hash.SHA1) assert.NoError(t, err) assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", Hash) newNow := now.Add(time.Minute) - err = o.SetModTime(newNow) + err = o.SetModTime(context.Background(), newNow) assert.NoError(t, err) - assert.Equal(t, newNow, o.ModTime()) + assert.Equal(t, newNow, o.ModTime(context.Background())) checkOpen := func(rc io.ReadCloser, expected string) { actual, err := ioutil.ReadAll(rc) @@ -117,18 +118,18 @@ func TestMemoryObject(t *testing.T) { } checkContent := func(o fs.Object, expected string) { - rc, err := o.Open() + rc, err := o.Open(context.Background()) assert.NoError(t, err) checkOpen(rc, expected) } checkContent(o, string(content)) - rc, err := o.Open(&fs.RangeOption{Start: 1, End: 3}) + rc, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 3}) assert.NoError(t, err) checkOpen(rc, "ot") - rc, err = o.Open(&fs.SeekOption{Offset: 3}) + rc, err = o.Open(context.Background(), &fs.SeekOption{Offset: 3}) assert.NoError(t, err) checkOpen(rc, "ato") @@ -137,10 +138,10 @@ func TestMemoryObject(t *testing.T) { newContent := bytes.NewBufferString("Rutabaga") assert.True(t, newContent.Len() < cap(content)) // fits within cap(content) src := object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil) - err = o.Update(newContent, src) + err = o.Update(context.Background(), newContent, src) assert.NoError(t, err) checkContent(o, "Rutabaga") - assert.Equal(t, newNow, o.ModTime()) + assert.Equal(t, newNow, o.ModTime(context.Background())) assert.Equal(t, "Rutaba", string(content)) // check we re-used the buffer // not within the buffer @@ -149,7 +150,7 @@ func TestMemoryObject(t *testing.T) { newContent = bytes.NewBufferString(newStr) assert.True(t, newContent.Len() > cap(content)) // does not fit within cap(content) src = object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil) - err = o.Update(newContent, src) + err = o.Update(context.Background(), newContent, src) assert.NoError(t, err) checkContent(o, newStr) assert.Equal(t, "Rutaba", string(content)) // check we didn't re-use the buffer @@ -158,7 +159,7 @@ func TestMemoryObject(t *testing.T) { newStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" newContent = bytes.NewBufferString(newStr) src = object.NewStaticObjectInfo(remote, newNow, -1, true, nil, nil) - err = o.Update(newContent, src) + err = o.Update(context.Background(), newContent, src) assert.NoError(t, err) checkContent(o, newStr) @@ -166,10 +167,10 @@ func TestMemoryObject(t *testing.T) { newStr = "" newContent = bytes.NewBufferString(newStr) src = object.NewStaticObjectInfo(remote, newNow, 0, true, nil, nil) - err = o.Update(newContent, src) + err = o.Update(context.Background(), newContent, src) assert.NoError(t, err) checkContent(o, newStr) - err = o.Remove() + err = o.Remove(context.Background()) assert.Error(t, err) } diff --git a/fs/operations/dedupe.go b/fs/operations/dedupe.go index 110e3b6b2..b57651989 100644 --- a/fs/operations/dedupe.go +++ b/fs/operations/dedupe.go @@ -3,6 +3,7 @@ package operations import ( + "context" "fmt" "log" "path" @@ -18,7 +19,7 @@ import ( ) // dedupeRename renames the objs slice to different names -func dedupeRename(f fs.Fs, remote string, objs []fs.Object) { +func dedupeRename(ctx context.Context, f fs.Fs, remote string, objs []fs.Object) { doMove := f.Features().Move if doMove == nil { log.Fatalf("Fs %v doesn't support Move", f) @@ -30,7 +31,7 @@ outer: for i, o := range objs { suffix := 1 newName := fmt.Sprintf("%s-%d%s", base, i+suffix, ext) - _, err := f.NewObject(newName) + _, err := f.NewObject(ctx, newName) for ; err != fs.ErrorObjectNotFound; suffix++ { if err != nil { fs.CountError(err) @@ -42,10 +43,10 @@ outer: continue outer } newName = fmt.Sprintf("%s-%d%s", base, i+suffix, ext) - _, err = f.NewObject(newName) + _, err = f.NewObject(ctx, newName) } if !fs.Config.DryRun { - newObj, err := doMove(o, newName) + newObj, err := doMove(ctx, o, newName) if err != nil { fs.CountError(err) fs.Errorf(o, "Failed to rename: %v", err) @@ -59,22 +60,22 @@ outer: } // dedupeDeleteAllButOne deletes all but the one in keep -func dedupeDeleteAllButOne(keep int, remote string, objs []fs.Object) { +func dedupeDeleteAllButOne(ctx context.Context, keep int, remote string, objs []fs.Object) { for i, o := range objs { if i == keep { continue } - _ = DeleteFile(o) + _ = DeleteFile(ctx, o) } fs.Logf(remote, "Deleted %d extra copies", len(objs)-1) } // dedupeDeleteIdentical deletes all but one of identical (by hash) copies -func dedupeDeleteIdentical(ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) { +func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) { // See how many of these duplicates are identical byHash := make(map[string][]fs.Object, len(objs)) for _, o := range objs { - md5sum, err := o.Hash(ht) + md5sum, err := o.Hash(ctx, ht) if err != nil || md5sum == "" { remainingObjs = append(remainingObjs, o) } else { @@ -87,7 +88,7 @@ func dedupeDeleteIdentical(ht hash.Type, remote string, objs []fs.Object) (remai if len(hashObjs) > 1 { fs.Logf(remote, "Deleting %d/%d identical duplicates (%v %q)", len(hashObjs)-1, len(hashObjs), ht, md5sum) for _, o := range hashObjs[1:] { - _ = DeleteFile(o) + _ = DeleteFile(ctx, o) } } remainingObjs = append(remainingObjs, hashObjs[0]) @@ -97,22 +98,22 @@ func dedupeDeleteIdentical(ht hash.Type, remote string, objs []fs.Object) (remai } // dedupeInteractive interactively dedupes the slice of objects -func dedupeInteractive(f fs.Fs, ht hash.Type, remote string, objs []fs.Object) { +func dedupeInteractive(ctx context.Context, f fs.Fs, ht hash.Type, remote string, objs []fs.Object) { fmt.Printf("%s: %d duplicates remain\n", remote, len(objs)) for i, o := range objs { - md5sum, err := o.Hash(ht) + md5sum, err := o.Hash(ctx, ht) if err != nil { md5sum = err.Error() } - fmt.Printf(" %d: %12d bytes, %s, %v %32s\n", i+1, o.Size(), o.ModTime().Local().Format("2006-01-02 15:04:05.000000000"), ht, md5sum) + fmt.Printf(" %d: %12d bytes, %s, %v %32s\n", i+1, o.Size(), o.ModTime(ctx).Local().Format("2006-01-02 15:04:05.000000000"), ht, md5sum) } switch config.Command([]string{"sSkip and do nothing", "kKeep just one (choose which in next step)", "rRename all to be different (by changing file.jpg to file-1.jpg)"}) { case 's': case 'k': keep := config.ChooseNumber("Enter the number of the file to keep", 1, len(objs)) - dedupeDeleteAllButOne(keep-1, remote, objs) + dedupeDeleteAllButOne(ctx, keep-1, remote, objs) case 'r': - dedupeRename(f, remote, objs) + dedupeRename(ctx, f, remote, objs) } } @@ -121,7 +122,7 @@ type objectsSortedByModTime []fs.Object func (objs objectsSortedByModTime) Len() int { return len(objs) } func (objs objectsSortedByModTime) Swap(i, j int) { objs[i], objs[j] = objs[j], objs[i] } func (objs objectsSortedByModTime) Less(i, j int) bool { - return objs[i].ModTime().Before(objs[j].ModTime()) + return objs[i].ModTime(context.TODO()).Before(objs[j].ModTime(context.TODO())) } // DeduplicateMode is how the dedupe command chooses what to do @@ -190,9 +191,9 @@ func (x *DeduplicateMode) Type() string { var _ pflag.Value = (*DeduplicateMode)(nil) // dedupeFindDuplicateDirs scans f for duplicate directories -func dedupeFindDuplicateDirs(f fs.Fs) ([][]fs.Directory, error) { +func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, error) { dirs := map[string][]fs.Directory{} - err := walk.ListR(f, "", true, fs.Config.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error { + err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error { entries.ForDir(func(d fs.Directory) { dirs[d.Remote()] = append(dirs[d.Remote()], d) }) @@ -211,7 +212,7 @@ func dedupeFindDuplicateDirs(f fs.Fs) ([][]fs.Directory, error) { } // dedupeMergeDuplicateDirs merges all the duplicate directories found -func dedupeMergeDuplicateDirs(f fs.Fs, duplicateDirs [][]fs.Directory) error { +func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs.Directory) error { mergeDirs := f.Features().MergeDirs if mergeDirs == nil { return errors.Errorf("%v: can't merge directories", f) @@ -223,7 +224,7 @@ func dedupeMergeDuplicateDirs(f fs.Fs, duplicateDirs [][]fs.Directory) error { for _, dirs := range duplicateDirs { if !fs.Config.DryRun { fs.Infof(dirs[0], "Merging contents of duplicate directories") - err := mergeDirs(dirs) + err := mergeDirs(ctx, dirs) if err != nil { return errors.Wrap(err, "merge duplicate dirs") } @@ -238,20 +239,20 @@ func dedupeMergeDuplicateDirs(f fs.Fs, duplicateDirs [][]fs.Directory) error { // Deduplicate interactively finds duplicate files and offers to // delete all but one or rename them to be different. Only useful with // Google Drive which can have duplicate file names. -func Deduplicate(f fs.Fs, mode DeduplicateMode) error { +func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error { fs.Infof(f, "Looking for duplicates using %v mode.", mode) // Find duplicate directories first and fix them - repeat // until all fixed for { - duplicateDirs, err := dedupeFindDuplicateDirs(f) + duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f) if err != nil { return err } if len(duplicateDirs) == 0 { break } - err = dedupeMergeDuplicateDirs(f, duplicateDirs) + err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs) if err != nil { return err } @@ -265,7 +266,7 @@ func Deduplicate(f fs.Fs, mode DeduplicateMode) error { // Now find duplicate files files := map[string][]fs.Object{} - err := walk.ListR(f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { + err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { entries.ForObject(func(o fs.Object) { remote := o.Remote() files[remote] = append(files[remote], o) @@ -279,24 +280,24 @@ func Deduplicate(f fs.Fs, mode DeduplicateMode) error { for remote, objs := range files { if len(objs) > 1 { fs.Logf(remote, "Found %d duplicates - deleting identical copies", len(objs)) - objs = dedupeDeleteIdentical(ht, remote, objs) + objs = dedupeDeleteIdentical(ctx, ht, remote, objs) if len(objs) <= 1 { fs.Logf(remote, "All duplicates removed") continue } switch mode { case DeduplicateInteractive: - dedupeInteractive(f, ht, remote, objs) + dedupeInteractive(ctx, f, ht, remote, objs) case DeduplicateFirst: - dedupeDeleteAllButOne(0, remote, objs) + dedupeDeleteAllButOne(ctx, 0, remote, objs) case DeduplicateNewest: sort.Sort(objectsSortedByModTime(objs)) // sort oldest first - dedupeDeleteAllButOne(len(objs)-1, remote, objs) + dedupeDeleteAllButOne(ctx, len(objs)-1, remote, objs) case DeduplicateOldest: sort.Sort(objectsSortedByModTime(objs)) // sort oldest first - dedupeDeleteAllButOne(0, remote, objs) + dedupeDeleteAllButOne(ctx, 0, remote, objs) case DeduplicateRename: - dedupeRename(f, remote, objs) + dedupeRename(ctx, f, remote, objs) case DeduplicateLargest: largest, largestIndex := int64(-1), -1 for i, obj := range objs { @@ -306,7 +307,7 @@ func Deduplicate(f fs.Fs, mode DeduplicateMode) error { } } if largestIndex > -1 { - dedupeDeleteAllButOne(largestIndex, remote, objs) + dedupeDeleteAllButOne(ctx, largestIndex, remote, objs) } case DeduplicateSkip: // skip diff --git a/fs/operations/dedupe_test.go b/fs/operations/dedupe_test.go index e5807bfac..e3a4ef283 100644 --- a/fs/operations/dedupe_test.go +++ b/fs/operations/dedupe_test.go @@ -1,6 +1,7 @@ package operations_test import ( + "context" "testing" "time" @@ -37,12 +38,12 @@ func TestDeduplicateInteractive(t *testing.T) { skipIfCantDedupe(t, r.Fremote) skipIfNoHash(t, r.Fremote) - file1 := r.WriteUncheckedObject("one", "This is one", t1) - file2 := r.WriteUncheckedObject("one", "This is one", t1) - file3 := r.WriteUncheckedObject("one", "This is one", t1) + file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) + file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) + file3 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) r.CheckWithDuplicates(t, file1, file2, file3) - err := operations.Deduplicate(r.Fremote, operations.DeduplicateInteractive) + err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateInteractive) require.NoError(t, err) fstest.CheckItems(t, r.Fremote, file1) @@ -54,17 +55,17 @@ func TestDeduplicateSkip(t *testing.T) { skipIfCantDedupe(t, r.Fremote) haveHash := r.Fremote.Hashes().GetOne() != hash.None - file1 := r.WriteUncheckedObject("one", "This is one", t1) + file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) files := []fstest.Item{file1} if haveHash { - file2 := r.WriteUncheckedObject("one", "This is one", t1) + file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) files = append(files, file2) } - file3 := r.WriteUncheckedObject("one", "This is another one", t1) + file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t1) files = append(files, file3) r.CheckWithDuplicates(t, files...) - err := operations.Deduplicate(r.Fremote, operations.DeduplicateSkip) + err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateSkip) require.NoError(t, err) r.CheckWithDuplicates(t, file1, file3) @@ -75,18 +76,18 @@ func TestDeduplicateFirst(t *testing.T) { defer r.Finalise() skipIfCantDedupe(t, r.Fremote) - file1 := r.WriteUncheckedObject("one", "This is one", t1) - file2 := r.WriteUncheckedObject("one", "This is one A", t1) - file3 := r.WriteUncheckedObject("one", "This is one BB", t1) + file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) + file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one A", t1) + file3 := r.WriteUncheckedObject(context.Background(), "one", "This is one BB", t1) r.CheckWithDuplicates(t, file1, file2, file3) - err := operations.Deduplicate(r.Fremote, operations.DeduplicateFirst) + err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateFirst) require.NoError(t, err) // list until we get one object var objects, size int64 for try := 1; try <= *fstest.ListRetries; try++ { - objects, size, err = operations.Count(r.Fremote) + objects, size, err = operations.Count(context.Background(), r.Fremote) require.NoError(t, err) if objects == 1 { break @@ -104,12 +105,12 @@ func TestDeduplicateNewest(t *testing.T) { defer r.Finalise() skipIfCantDedupe(t, r.Fremote) - file1 := r.WriteUncheckedObject("one", "This is one", t1) - file2 := r.WriteUncheckedObject("one", "This is one too", t2) - file3 := r.WriteUncheckedObject("one", "This is another one", t3) + file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) + file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one too", t2) + file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t3) r.CheckWithDuplicates(t, file1, file2, file3) - err := operations.Deduplicate(r.Fremote, operations.DeduplicateNewest) + err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateNewest) require.NoError(t, err) fstest.CheckItems(t, r.Fremote, file3) @@ -120,12 +121,12 @@ func TestDeduplicateOldest(t *testing.T) { defer r.Finalise() skipIfCantDedupe(t, r.Fremote) - file1 := r.WriteUncheckedObject("one", "This is one", t1) - file2 := r.WriteUncheckedObject("one", "This is one too", t2) - file3 := r.WriteUncheckedObject("one", "This is another one", t3) + file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) + file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one too", t2) + file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t3) r.CheckWithDuplicates(t, file1, file2, file3) - err := operations.Deduplicate(r.Fremote, operations.DeduplicateOldest) + err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateOldest) require.NoError(t, err) fstest.CheckItems(t, r.Fremote, file1) @@ -136,12 +137,12 @@ func TestDeduplicateLargest(t *testing.T) { defer r.Finalise() skipIfCantDedupe(t, r.Fremote) - file1 := r.WriteUncheckedObject("one", "This is one", t1) - file2 := r.WriteUncheckedObject("one", "This is one too", t2) - file3 := r.WriteUncheckedObject("one", "This is another one", t3) + file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) + file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one too", t2) + file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t3) r.CheckWithDuplicates(t, file1, file2, file3) - err := operations.Deduplicate(r.Fremote, operations.DeduplicateLargest) + err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateLargest) require.NoError(t, err) fstest.CheckItems(t, r.Fremote, file3) @@ -152,16 +153,16 @@ func TestDeduplicateRename(t *testing.T) { defer r.Finalise() skipIfCantDedupe(t, r.Fremote) - file1 := r.WriteUncheckedObject("one.txt", "This is one", t1) - file2 := r.WriteUncheckedObject("one.txt", "This is one too", t2) - file3 := r.WriteUncheckedObject("one.txt", "This is another one", t3) - file4 := r.WriteUncheckedObject("one-1.txt", "This is not a duplicate", t1) + file1 := r.WriteUncheckedObject(context.Background(), "one.txt", "This is one", t1) + file2 := r.WriteUncheckedObject(context.Background(), "one.txt", "This is one too", t2) + file3 := r.WriteUncheckedObject(context.Background(), "one.txt", "This is another one", t3) + file4 := r.WriteUncheckedObject(context.Background(), "one-1.txt", "This is not a duplicate", t1) r.CheckWithDuplicates(t, file1, file2, file3, file4) - err := operations.Deduplicate(r.Fremote, operations.DeduplicateRename) + err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateRename) require.NoError(t, err) - require.NoError(t, walk.ListR(r.Fremote, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { + require.NoError(t, walk.ListR(context.Background(), r.Fremote, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { entries.ForObject(func(o fs.Object) { remote := o.Remote() if remote != "one-1.txt" && @@ -196,23 +197,23 @@ func TestMergeDirs(t *testing.T) { t.Skip("Can't merge directories") } - file1 := r.WriteObject("dupe1/one.txt", "This is one", t1) - file2 := r.WriteObject("dupe2/two.txt", "This is one too", t2) - file3 := r.WriteObject("dupe3/three.txt", "This is another one", t3) + file1 := r.WriteObject(context.Background(), "dupe1/one.txt", "This is one", t1) + file2 := r.WriteObject(context.Background(), "dupe2/two.txt", "This is one too", t2) + file3 := r.WriteObject(context.Background(), "dupe3/three.txt", "This is another one", t3) - objs, dirs, err := walk.GetAll(r.Fremote, "", true, 1) + objs, dirs, err := walk.GetAll(context.Background(), r.Fremote, "", true, 1) require.NoError(t, err) assert.Equal(t, 3, len(dirs)) assert.Equal(t, 0, len(objs)) - err = mergeDirs(dirs) + err = mergeDirs(context.Background(), dirs) require.NoError(t, err) file2.Path = "dupe1/two.txt" file3.Path = "dupe1/three.txt" fstest.CheckItems(t, r.Fremote, file1, file2, file3) - objs, dirs, err = walk.GetAll(r.Fremote, "", true, 1) + objs, dirs, err = walk.GetAll(context.Background(), r.Fremote, "", true, 1) require.NoError(t, err) assert.Equal(t, 1, len(dirs)) assert.Equal(t, 0, len(objs)) diff --git a/fs/operations/listdirsorted_test.go b/fs/operations/listdirsorted_test.go index 7c846393a..10b9d62ec 100644 --- a/fs/operations/listdirsorted_test.go +++ b/fs/operations/listdirsorted_test.go @@ -1,6 +1,7 @@ package operations_test import ( + "context" "testing" "github.com/ncw/rclone/fs" @@ -23,13 +24,13 @@ func TestListDirSorted(t *testing.T) { }() files := []fstest.Item{ - r.WriteObject("a.txt", "hello world", t1), - r.WriteObject("zend.txt", "hello", t1), - r.WriteObject("sub dir/hello world", "hello world", t1), - r.WriteObject("sub dir/hello world2", "hello world", t1), - r.WriteObject("sub dir/ignore dir/.ignore", "", t1), - r.WriteObject("sub dir/ignore dir/should be ignored", "to ignore", t1), - r.WriteObject("sub dir/sub sub dir/hello world3", "hello world", t1), + r.WriteObject(context.Background(), "a.txt", "hello world", t1), + r.WriteObject(context.Background(), "zend.txt", "hello", t1), + r.WriteObject(context.Background(), "sub dir/hello world", "hello world", t1), + r.WriteObject(context.Background(), "sub dir/hello world2", "hello world", t1), + r.WriteObject(context.Background(), "sub dir/ignore dir/.ignore", "", t1), + r.WriteObject(context.Background(), "sub dir/ignore dir/should be ignored", "to ignore", t1), + r.WriteObject(context.Background(), "sub dir/sub sub dir/hello world3", "hello world", t1), } fstest.CheckItems(t, r.Fremote, files...) var items fs.DirEntries @@ -50,20 +51,20 @@ func TestListDirSorted(t *testing.T) { return name } - items, err = list.DirSorted(r.Fremote, true, "") + items, err = list.DirSorted(context.Background(), r.Fremote, true, "") require.NoError(t, err) require.Len(t, items, 3) assert.Equal(t, "a.txt", str(0)) assert.Equal(t, "sub dir/", str(1)) assert.Equal(t, "zend.txt", str(2)) - items, err = list.DirSorted(r.Fremote, false, "") + items, err = list.DirSorted(context.Background(), r.Fremote, false, "") require.NoError(t, err) require.Len(t, items, 2) assert.Equal(t, "sub dir/", str(0)) assert.Equal(t, "zend.txt", str(1)) - items, err = list.DirSorted(r.Fremote, true, "sub dir") + items, err = list.DirSorted(context.Background(), r.Fremote, true, "sub dir") require.NoError(t, err) require.Len(t, items, 4) assert.Equal(t, "sub dir/hello world", str(0)) @@ -71,7 +72,7 @@ func TestListDirSorted(t *testing.T) { assert.Equal(t, "sub dir/ignore dir/", str(2)) assert.Equal(t, "sub dir/sub sub dir/", str(3)) - items, err = list.DirSorted(r.Fremote, false, "sub dir") + items, err = list.DirSorted(context.Background(), r.Fremote, false, "sub dir") require.NoError(t, err) require.Len(t, items, 2) assert.Equal(t, "sub dir/ignore dir/", str(0)) @@ -80,23 +81,23 @@ func TestListDirSorted(t *testing.T) { // testing ignore file filter.Active.Opt.ExcludeFile = ".ignore" - items, err = list.DirSorted(r.Fremote, false, "sub dir") + items, err = list.DirSorted(context.Background(), r.Fremote, false, "sub dir") require.NoError(t, err) require.Len(t, items, 1) assert.Equal(t, "sub dir/sub sub dir/", str(0)) - items, err = list.DirSorted(r.Fremote, false, "sub dir/ignore dir") + items, err = list.DirSorted(context.Background(), r.Fremote, false, "sub dir/ignore dir") require.NoError(t, err) require.Len(t, items, 0) - items, err = list.DirSorted(r.Fremote, true, "sub dir/ignore dir") + items, err = list.DirSorted(context.Background(), r.Fremote, true, "sub dir/ignore dir") require.NoError(t, err) require.Len(t, items, 2) assert.Equal(t, "sub dir/ignore dir/.ignore", str(0)) assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1)) filter.Active.Opt.ExcludeFile = "" - items, err = list.DirSorted(r.Fremote, false, "sub dir/ignore dir") + items, err = list.DirSorted(context.Background(), r.Fremote, false, "sub dir/ignore dir") require.NoError(t, err) require.Len(t, items, 2) assert.Equal(t, "sub dir/ignore dir/.ignore", str(0)) diff --git a/fs/operations/lsjson.go b/fs/operations/lsjson.go index 298c96c8b..faaa61e21 100644 --- a/fs/operations/lsjson.go +++ b/fs/operations/lsjson.go @@ -1,6 +1,7 @@ package operations import ( + "context" "path" "time" @@ -78,7 +79,7 @@ type ListJSONOpt struct { } // ListJSON lists fsrc using the options in opt calling callback for each item -func ListJSON(fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJSONItem) error) error { +func ListJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJSONItem) error) error { var cipher crypt.Cipher if opt.ShowEncrypted { fsInfo, _, _, config, err := fs.ConfigFs(fsrc.Name() + ":" + fsrc.Root()) @@ -97,7 +98,7 @@ func ListJSON(fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJS canGetTier := features.GetTier format := formatForPrecision(fsrc.Precision()) isBucket := features.BucketBased && remote == "" && fsrc.Root() == "" // if bucket based remote listing the root mark directories as buckets - err := walk.ListR(fsrc, remote, false, ConfigMaxDepth(opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) { + err := walk.ListR(ctx, fsrc, remote, false, ConfigMaxDepth(opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) { for _, entry := range entries { switch entry.(type) { case fs.Directory: @@ -116,10 +117,10 @@ func ListJSON(fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJS Path: entry.Remote(), Name: path.Base(entry.Remote()), Size: entry.Size(), - MimeType: fs.MimeTypeDirEntry(entry), + MimeType: fs.MimeTypeDirEntry(ctx, entry), } if !opt.NoModTime { - item.ModTime = Timestamp{When: entry.ModTime(), Format: format} + item.ModTime = Timestamp{When: entry.ModTime(ctx), Format: format} } if cipher != nil { switch entry.(type) { @@ -161,7 +162,7 @@ func ListJSON(fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJS if opt.ShowHash { item.Hashes = make(map[string]string) for _, hashType := range x.Fs().Hashes().Array() { - hash, err := x.Hash(hashType) + hash, err := x.Hash(ctx, hashType) if err != nil { fs.Errorf(x, "Failed to read hash: %v", err) } else if hash != "" { diff --git a/fs/operations/multithread.go b/fs/operations/multithread.go index bbb913287..211b32102 100644 --- a/fs/operations/multithread.go +++ b/fs/operations/multithread.go @@ -28,7 +28,7 @@ type multiThreadCopyState struct { } // Copy a single stream into place -func (mc *multiThreadCopyState) copyStream(stream int) (err error) { +func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err error) { defer func() { if err != nil { fs.Debugf(mc.src, "multi-thread copy: stream %d/%d failed: %v", stream+1, mc.streams, err) @@ -45,7 +45,7 @@ func (mc *multiThreadCopyState) copyStream(stream int) (err error) { fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v starting", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start)) - rc, err := newReOpen(mc.src, nil, &fs.RangeOption{Start: start, End: end - 1}, fs.Config.LowLevelRetries) + rc, err := newReOpen(ctx, mc.src, nil, &fs.RangeOption{Start: start, End: end - 1}, fs.Config.LowLevelRetries) if err != nil { return errors.Wrap(err, "multpart copy: failed to open source") } @@ -110,7 +110,7 @@ func (mc *multiThreadCopyState) calculateChunks() { } // Copy src to (f, remote) using streams download threads and the OpenWriterAt feature -func multiThreadCopy(f fs.Fs, remote string, src fs.Object, streams int) (newDst fs.Object, err error) { +func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, streams int) (newDst fs.Object, err error) { openWriterAt := f.Features().OpenWriterAt if openWriterAt == nil { return nil, errors.New("multi-thread copy: OpenWriterAt not supported") @@ -136,7 +136,7 @@ func multiThreadCopy(f fs.Fs, remote string, src fs.Object, streams int) (newDst defer fs.CheckClose(mc.acc, &err) // create write file handle - mc.wc, err = openWriterAt(remote, mc.size) + mc.wc, err = openWriterAt(ctx, remote, mc.size) if err != nil { return nil, errors.Wrap(err, "multpart copy: failed to open destination") } @@ -146,7 +146,7 @@ func multiThreadCopy(f fs.Fs, remote string, src fs.Object, streams int) (newDst for stream := 0; stream < mc.streams; stream++ { stream := stream g.Go(func() (err error) { - return mc.copyStream(stream) + return mc.copyStream(ctx, stream) }) } err = g.Wait() @@ -154,12 +154,12 @@ func multiThreadCopy(f fs.Fs, remote string, src fs.Object, streams int) (newDst return nil, err } - obj, err := f.NewObject(remote) + obj, err := f.NewObject(ctx, remote) if err != nil { return nil, errors.Wrap(err, "multi-thread copy: failed to find object after copy") } - err = obj.SetModTime(src.ModTime()) + err = obj.SetModTime(ctx, src.ModTime(ctx)) switch err { case nil, fs.ErrorCantSetModTime, fs.ErrorCantSetModTimeWithoutDelete: default: diff --git a/fs/operations/multithread_test.go b/fs/operations/multithread_test.go index 4ebc54484..cd1883fb9 100644 --- a/fs/operations/multithread_test.go +++ b/fs/operations/multithread_test.go @@ -1,6 +1,7 @@ package operations import ( + "context" "fmt" "testing" @@ -50,20 +51,20 @@ func TestMultithreadCopy(t *testing.T) { t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) { contents := fstest.RandomString(test.size) t1 := fstest.Time("2001-02-03T04:05:06.499999999Z") - file1 := r.WriteObject("file1", contents, t1) + file1 := r.WriteObject(context.Background(), "file1", contents, t1) fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Flocal) - src, err := r.Fremote.NewObject("file1") + src, err := r.Fremote.NewObject(context.Background(), "file1") require.NoError(t, err) - dst, err := multiThreadCopy(r.Flocal, "file1", src, 2) + dst, err := multiThreadCopy(context.Background(), r.Flocal, "file1", src, 2) require.NoError(t, err) assert.Equal(t, src.Size(), dst.Size()) assert.Equal(t, "file1", dst.Remote()) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, nil, fs.ModTimeNotSupported) - require.NoError(t, dst.Remove()) + require.NoError(t, dst.Remove(context.Background())) }) } diff --git a/fs/operations/operations.go b/fs/operations/operations.go index d1e04da5c..10c6e107d 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -45,14 +45,14 @@ import ( // err - may return an error which will already have been logged // // If an error is returned it will return equal as false -func CheckHashes(src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) { +func CheckHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) { common := src.Fs().Hashes().Overlap(dst.Fs().Hashes()) // fs.Debugf(nil, "Shared hashes: %v", common) if common.Count() == 0 { return true, hash.None, nil } ht = common.GetOne() - srcHash, err := src.Hash(ht) + srcHash, err := src.Hash(ctx, ht) if err != nil { fs.CountError(err) fs.Errorf(src, "Failed to calculate src hash: %v", err) @@ -61,7 +61,7 @@ func CheckHashes(src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, er if srcHash == "" { return true, hash.None, nil } - dstHash, err := dst.Hash(ht) + dstHash, err := dst.Hash(ctx, ht) if err != nil { fs.CountError(err) fs.Errorf(dst, "Failed to calculate dst hash: %v", err) @@ -95,8 +95,8 @@ func CheckHashes(src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, er // // Otherwise the file is considered to be not equal including if there // were errors reading info. -func Equal(src fs.ObjectInfo, dst fs.Object) bool { - return equal(src, dst, fs.Config.SizeOnly, fs.Config.CheckSum) +func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool { + return equal(ctx, src, dst, fs.Config.SizeOnly, fs.Config.CheckSum) } // sizeDiffers compare the size of src and dst taking into account the @@ -110,7 +110,7 @@ func sizeDiffers(src, dst fs.ObjectInfo) bool { var checksumWarning sync.Once -func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { +func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { if sizeDiffers(src, dst) { fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size()) return false @@ -125,7 +125,7 @@ func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { // If checking checksum and not modtime if checkSum { // Check the hash - same, ht, _ := CheckHashes(src, dst) + same, ht, _ := CheckHashes(ctx, src, dst) if !same { fs.Debugf(src, "%v differ", ht) return false @@ -147,8 +147,8 @@ func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { fs.Debugf(src, "Sizes identical") return true } - srcModTime := src.ModTime() - dstModTime := dst.ModTime() + srcModTime := src.ModTime(ctx) + dstModTime := dst.ModTime(ctx) dt := dstModTime.Sub(srcModTime) if dt < modifyWindow && dt > -modifyWindow { fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow) @@ -158,7 +158,7 @@ func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime) // Check if the hashes are the same - same, ht, _ := CheckHashes(src, dst) + same, ht, _ := CheckHashes(ctx, src, dst) if !same { fs.Debugf(src, "%v differ", ht) return false @@ -180,7 +180,7 @@ func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { return false } // Update the mtime of the dst object here - err := dst.SetModTime(srcModTime) + err := dst.SetModTime(ctx, srcModTime) if err == fs.ErrorCantSetModTime { fs.Debugf(dst, "src and dst identical but can't set mod time without re-uploading") return false @@ -189,7 +189,7 @@ func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { // Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file // put in the BackupDir than deleted which is what will happen if we don't delete it. if fs.Config.BackupDir == "" { - err = dst.Remove() + err = dst.Remove(ctx) if err != nil { fs.Errorf(dst, "failed to delete before re-upload: %v", err) } @@ -209,12 +209,12 @@ func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { // Used to remove a failed copy // // Returns whether the file was successfully removed or not -func removeFailedCopy(dst fs.Object) bool { +func removeFailedCopy(ctx context.Context, dst fs.Object) bool { if dst == nil { return false } fs.Infof(dst, "Removing failed copy") - removeErr := dst.Remove() + removeErr := dst.Remove(ctx) if removeErr != nil { fs.Infof(dst, "Failed to remove failed copy: %s", removeErr) return false @@ -235,9 +235,9 @@ func (o *overrideRemoteObject) Remote() string { // MimeType returns the mime type of the underlying object or "" if it // can't be worked out -func (o *overrideRemoteObject) MimeType() string { +func (o *overrideRemoteObject) MimeType(ctx context.Context) string { if do, ok := o.Object.(fs.MimeTyper); ok { - return do.MimeType() + return do.MimeType(ctx) } return "" } @@ -250,7 +250,7 @@ var _ fs.MimeTyper = (*overrideRemoteObject)(nil) // // It returns the destination object if possible. Note that this may // be nil. -func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { +func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { accounting.Stats.Transferring(src.Remote()) defer func() { accounting.Stats.DoneTransferring(src.Remote(), err == nil) @@ -284,7 +284,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec if fs.Config.MaxTransfer >= 0 && accounting.Stats.GetBytes() >= int64(fs.Config.MaxTransfer) { return nil, accounting.ErrorMaxTransferLimitReached } - newDst, err = doCopy(src, remote) + newDst, err = doCopy(ctx, src, remote) if err == nil { dst = newDst accounting.Stats.Bytes(dst.Size()) // account the bytes for the server side transfer @@ -304,7 +304,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec if streams < 2 { streams = 2 } - dst, err = multiThreadCopy(f, remote, src, int(streams)) + dst, err = multiThreadCopy(ctx, f, remote, src, int(streams)) if doUpdate { actionTaken = "Multi-thread Copied (replaced existing)" } else { @@ -312,7 +312,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec } } else { var in0 io.ReadCloser - in0, err = newReOpen(src, hashOption, nil, fs.Config.LowLevelRetries) + in0, err = newReOpen(ctx, src, hashOption, nil, fs.Config.LowLevelRetries) if err != nil { err = errors.Wrap(err, "failed to open source object") } else { @@ -323,7 +323,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec } else { actionTaken = "Copied (Rcat, new)" } - dst, err = Rcat(f, remote, in0, src.ModTime()) + dst, err = Rcat(ctx, f, remote, in0, src.ModTime(ctx)) newDst = dst } else { in := accounting.NewAccount(in0, src).WithBuffer() // account and buffer the transfer @@ -334,10 +334,10 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec } if doUpdate { actionTaken = "Copied (replaced existing)" - err = dst.Update(in, wrappedSrc, hashOption) + err = dst.Update(ctx, in, wrappedSrc, hashOption) } else { actionTaken = "Copied (new)" - dst, err = f.Put(in, wrappedSrc, hashOption) + dst, err = f.Put(ctx, in, wrappedSrc, hashOption) } closeErr := in.Close() if err == nil { @@ -371,20 +371,20 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size()) fs.Errorf(dst, "%v", err) fs.CountError(err) - removeFailedCopy(dst) + removeFailedCopy(ctx, dst) return newDst, err } // Verify hashes are the same after transfer - ignoring blank hashes if !fs.Config.IgnoreChecksum && hashType != hash.None { var srcSum string - srcSum, err = src.Hash(hashType) + srcSum, err = src.Hash(ctx, hashType) if err != nil { fs.CountError(err) fs.Errorf(src, "Failed to read src hash: %v", err) } else if srcSum != "" { var dstSum string - dstSum, err = dst.Hash(hashType) + dstSum, err = dst.Hash(ctx, hashType) if err != nil { fs.CountError(err) fs.Errorf(dst, "Failed to read hash: %v", err) @@ -392,7 +392,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum) fs.Errorf(dst, "%v", err) fs.CountError(err) - removeFailedCopy(dst) + removeFailedCopy(ctx, dst) return newDst, err } } @@ -427,7 +427,7 @@ func SameObject(src, dst fs.Object) bool { // // It returns the destination object if possible. Note that this may // be nil. -func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { +func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { accounting.Stats.Checking(src.Remote()) defer func() { accounting.Stats.DoneChecking(src.Remote()) @@ -441,13 +441,13 @@ func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Ob if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) { // Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive) if dst != nil && !SameObject(src, dst) { - err = DeleteFile(dst) + err = DeleteFile(ctx, dst) if err != nil { return newDst, err } } // Move dst <- src - newDst, err = doMove(src, remote) + newDst, err = doMove(ctx, src, remote) switch err { case nil: fs.Infof(src, "Moved (server side)") @@ -461,13 +461,13 @@ func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Ob } } // Move not found or didn't work so copy dst <- src - newDst, err = Copy(fdst, dst, remote, src) + newDst, err = Copy(ctx, fdst, dst, remote, src) if err != nil { fs.Errorf(src, "Not deleting source as copy failed: %v", err) return newDst, err } // Delete src if no error on copy - return newDst, DeleteFile(src) + return newDst, DeleteFile(ctx, src) } // CanServerSideMove returns true if fdst support server side moves or @@ -500,7 +500,7 @@ func SuffixName(remote string) string { // // If backupDir is set then it moves the file to there instead of // deleting -func DeleteFileWithBackupDir(dst fs.Object, backupDir fs.Fs) (err error) { +func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) { accounting.Stats.Checking(dst.Remote()) numDeletes := accounting.Stats.Deletes(1) if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete { @@ -517,11 +517,11 @@ func DeleteFileWithBackupDir(dst fs.Object, backupDir fs.Fs) (err error) { err = errors.New("parameter to --backup-dir has to be on the same remote as destination") } else { remoteWithSuffix := SuffixName(dst.Remote()) - overwritten, _ := backupDir.NewObject(remoteWithSuffix) - _, err = Move(backupDir, overwritten, remoteWithSuffix, dst) + overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix) + _, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst) } } else { - err = dst.Remove() + err = dst.Remove(ctx) } if err != nil { fs.CountError(err) @@ -537,8 +537,8 @@ func DeleteFileWithBackupDir(dst fs.Object, backupDir fs.Fs) (err error) { // // If useBackupDir is set and --backup-dir is in effect then it moves // the file to there instead of deleting -func DeleteFile(dst fs.Object) (err error) { - return DeleteFileWithBackupDir(dst, nil) +func DeleteFile(ctx context.Context, dst fs.Object) (err error) { + return DeleteFileWithBackupDir(ctx, dst, nil) } // DeleteFilesWithBackupDir removes all the files passed in the @@ -546,7 +546,7 @@ func DeleteFile(dst fs.Object) (err error) { // // If backupDir is set the files will be placed into that directory // instead of being deleted. -func DeleteFilesWithBackupDir(toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error { +func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error { var wg sync.WaitGroup wg.Add(fs.Config.Transfers) var errorCount int32 @@ -556,7 +556,7 @@ func DeleteFilesWithBackupDir(toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error go func() { defer wg.Done() for dst := range toBeDeleted { - err := DeleteFileWithBackupDir(dst, backupDir) + err := DeleteFileWithBackupDir(ctx, dst, backupDir) if err != nil { atomic.AddInt32(&errorCount, 1) if fserrors.IsFatalError(err) { @@ -581,8 +581,8 @@ func DeleteFilesWithBackupDir(toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error } // DeleteFiles removes all the files passed in the channel -func DeleteFiles(toBeDeleted fs.ObjectsChan) error { - return DeleteFilesWithBackupDir(toBeDeleted, nil) +func DeleteFiles(ctx context.Context, toBeDeleted fs.ObjectsChan) error { + return DeleteFilesWithBackupDir(ctx, toBeDeleted, nil) } // SameRemoteType returns true if fdst and fsrc are the same type @@ -624,8 +624,8 @@ func Overlapping(fdst, fsrc fs.Info) bool { // // it returns true if differences were found // it also returns whether it couldn't be hashed -func checkIdentical(dst, src fs.Object) (differ bool, noHash bool) { - same, ht, err := CheckHashes(src, dst) +func checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) { + same, ht, err := CheckHashes(ctx, src, dst) if err != nil { // CheckHashes will log and count errors return true, false @@ -643,7 +643,7 @@ func checkIdentical(dst, src fs.Object) (differ bool, noHash bool) { } // checkFn is the the type of the checking function used in CheckFn() -type checkFn func(a, b fs.Object) (differ bool, noHash bool) +type checkFn func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) // checkMarch is used to march over two Fses in the same way as // sync/copy @@ -698,7 +698,7 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) { } // check to see if two objects are identical using the check function -func (c *checkMarch) checkIdentical(dst, src fs.Object) (differ bool, noHash bool) { +func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) { accounting.Stats.Checking(src.Remote()) defer accounting.Stats.DoneChecking(src.Remote()) if sizeDiffers(src, dst) { @@ -710,16 +710,16 @@ func (c *checkMarch) checkIdentical(dst, src fs.Object) (differ bool, noHash boo if fs.Config.SizeOnly { return false, false } - return c.check(dst, src) + return c.check(ctx, dst, src) } // Match is called when src and dst are present, so sync src to dst -func (c *checkMarch) Match(dst, src fs.DirEntry) (recurse bool) { +func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) { switch srcX := src.(type) { case fs.Object: dstX, ok := dst.(fs.Object) if ok { - differ, noHash := c.checkIdentical(dstX, srcX) + differ, noHash := c.checkIdentical(ctx, dstX, srcX) if differ { atomic.AddInt32(&c.differences, 1) } else { @@ -761,7 +761,7 @@ func (c *checkMarch) Match(dst, src fs.DirEntry) (recurse bool) { // // it returns true if differences were found // it also returns whether it couldn't be hashed -func CheckFn(fdst, fsrc fs.Fs, check checkFn, oneway bool) error { +func CheckFn(ctx context.Context, fdst, fsrc fs.Fs, check checkFn, oneway bool) error { c := &checkMarch{ fdst: fdst, fsrc: fsrc, @@ -771,7 +771,7 @@ func CheckFn(fdst, fsrc fs.Fs, check checkFn, oneway bool) error { // set up a march over fdst and fsrc m := &march.March{ - Ctx: context.Background(), + Ctx: ctx, Fdst: fdst, Fsrc: fsrc, Dir: "", @@ -801,8 +801,8 @@ func CheckFn(fdst, fsrc fs.Fs, check checkFn, oneway bool) error { } // Check the files in fsrc and fdst according to Size and hash -func Check(fdst, fsrc fs.Fs, oneway bool) error { - return CheckFn(fdst, fsrc, checkIdentical, oneway) +func Check(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error { + return CheckFn(ctx, fdst, fsrc, checkIdentical, oneway) } // CheckEqualReaders checks to see if in1 and in2 have the same @@ -839,15 +839,15 @@ func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) { // reading all their bytes if necessary. // // it returns true if differences were found -func CheckIdentical(dst, src fs.Object) (differ bool, err error) { - in1, err := dst.Open() +func CheckIdentical(ctx context.Context, dst, src fs.Object) (differ bool, err error) { + in1, err := dst.Open(ctx) if err != nil { return true, errors.Wrapf(err, "failed to open %q", dst) } in1 = accounting.NewAccount(in1, dst).WithBuffer() // account and buffer the transfer defer fs.CheckClose(in1, &err) - in2, err := src.Open() + in2, err := src.Open(ctx) if err != nil { return true, errors.Wrapf(err, "failed to open %q", src) } @@ -859,9 +859,9 @@ func CheckIdentical(dst, src fs.Object) (differ bool, err error) { // CheckDownload checks the files in fsrc and fdst according to Size // and the actual contents of the files. -func CheckDownload(fdst, fsrc fs.Fs, oneway bool) error { - check := func(a, b fs.Object) (differ bool, noHash bool) { - differ, err := CheckIdentical(a, b) +func CheckDownload(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error { + check := func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) { + differ, err := CheckIdentical(ctx, a, b) if err != nil { fs.CountError(err) fs.Errorf(a, "Failed to download: %v", err) @@ -869,14 +869,14 @@ func CheckDownload(fdst, fsrc fs.Fs, oneway bool) error { } return differ, false } - return CheckFn(fdst, fsrc, check, oneway) + return CheckFn(ctx, fdst, fsrc, check, oneway) } // ListFn lists the Fs to the supplied function // // Lists in parallel which may get them out of order -func ListFn(f fs.Fs, fn func(fs.Object)) error { - return walk.ListR(f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { +func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error { + return walk.ListR(ctx, f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { entries.ForObject(fn) return nil }) @@ -899,8 +899,8 @@ func syncFprintf(w io.Writer, format string, a ...interface{}) { // Shows size and path - obeys includes and excludes // // Lists in parallel which may get them out of order -func List(f fs.Fs, w io.Writer) error { - return ListFn(f, func(o fs.Object) { +func List(ctx context.Context, f fs.Fs, w io.Writer) error { + return ListFn(ctx, f, func(o fs.Object) { syncFprintf(w, "%9d %s\n", o.Size(), o.Remote()) }) } @@ -910,10 +910,10 @@ func List(f fs.Fs, w io.Writer) error { // Shows size, mod time and path - obeys includes and excludes // // Lists in parallel which may get them out of order -func ListLong(f fs.Fs, w io.Writer) error { - return ListFn(f, func(o fs.Object) { +func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error { + return ListFn(ctx, f, func(o fs.Object) { accounting.Stats.Checking(o.Remote()) - modTime := o.ModTime() + modTime := o.ModTime(ctx) accounting.Stats.DoneChecking(o.Remote()) syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote()) }) @@ -925,8 +925,8 @@ func ListLong(f fs.Fs, w io.Writer) error { // excludes // // Lists in parallel which may get them out of order -func Md5sum(f fs.Fs, w io.Writer) error { - return HashLister(hash.MD5, f, w) +func Md5sum(ctx context.Context, f fs.Fs, w io.Writer) error { + return HashLister(ctx, hash.MD5, f, w) } // Sha1sum list the Fs to the supplied writer @@ -934,8 +934,8 @@ func Md5sum(f fs.Fs, w io.Writer) error { // Obeys includes and excludes // // Lists in parallel which may get them out of order -func Sha1sum(f fs.Fs, w io.Writer) error { - return HashLister(hash.SHA1, f, w) +func Sha1sum(ctx context.Context, f fs.Fs, w io.Writer) error { + return HashLister(ctx, hash.SHA1, f, w) } // DropboxHashSum list the Fs to the supplied writer @@ -943,15 +943,15 @@ func Sha1sum(f fs.Fs, w io.Writer) error { // Obeys includes and excludes // // Lists in parallel which may get them out of order -func DropboxHashSum(f fs.Fs, w io.Writer) error { - return HashLister(hash.Dropbox, f, w) +func DropboxHashSum(ctx context.Context, f fs.Fs, w io.Writer) error { + return HashLister(ctx, hash.Dropbox, f, w) } // hashSum returns the human readable hash for ht passed in. This may // be UNSUPPORTED or ERROR. -func hashSum(ht hash.Type, o fs.Object) string { +func hashSum(ctx context.Context, ht hash.Type, o fs.Object) string { accounting.Stats.Checking(o.Remote()) - sum, err := o.Hash(ht) + sum, err := o.Hash(ctx, ht) accounting.Stats.DoneChecking(o.Remote()) if err == hash.ErrUnsupported { sum = "UNSUPPORTED" @@ -963,9 +963,9 @@ func hashSum(ht hash.Type, o fs.Object) string { } // HashLister does a md5sum equivalent for the hash type passed in -func HashLister(ht hash.Type, f fs.Fs, w io.Writer) error { - return ListFn(f, func(o fs.Object) { - sum := hashSum(ht, o) +func HashLister(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { + return ListFn(ctx, f, func(o fs.Object) { + sum := hashSum(ctx, ht, o) syncFprintf(w, "%*s %s\n", hash.Width[ht], sum, o.Remote()) }) } @@ -973,8 +973,8 @@ func HashLister(ht hash.Type, f fs.Fs, w io.Writer) error { // Count counts the objects and their sizes in the Fs // // Obeys includes and excludes -func Count(f fs.Fs) (objects int64, size int64, err error) { - err = ListFn(f, func(o fs.Object) { +func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, err error) { + err = ListFn(ctx, f, func(o fs.Object) { atomic.AddInt64(&objects, 1) objectSize := o.Size() if objectSize > 0 { @@ -994,11 +994,11 @@ func ConfigMaxDepth(recursive bool) int { } // ListDir lists the directories/buckets/containers in the Fs to the supplied writer -func ListDir(f fs.Fs, w io.Writer) error { - return walk.ListR(f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error { +func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error { + return walk.ListR(ctx, f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error { entries.ForDir(func(dir fs.Directory) { if dir != nil { - syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime().Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote()) + syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote()) } }) return nil @@ -1006,13 +1006,13 @@ func ListDir(f fs.Fs, w io.Writer) error { } // Mkdir makes a destination directory or container -func Mkdir(f fs.Fs, dir string) error { +func Mkdir(ctx context.Context, f fs.Fs, dir string) error { if fs.Config.DryRun { fs.Logf(fs.LogDirName(f, dir), "Not making directory as dry run is set") return nil } fs.Debugf(fs.LogDirName(f, dir), "Making directory") - err := f.Mkdir(dir) + err := f.Mkdir(ctx, dir) if err != nil { fs.CountError(err) return err @@ -1022,18 +1022,18 @@ func Mkdir(f fs.Fs, dir string) error { // TryRmdir removes a container but not if not empty. It doesn't // count errors but may return one. -func TryRmdir(f fs.Fs, dir string) error { +func TryRmdir(ctx context.Context, f fs.Fs, dir string) error { if fs.Config.DryRun { fs.Logf(fs.LogDirName(f, dir), "Not deleting as dry run is set") return nil } fs.Debugf(fs.LogDirName(f, dir), "Removing directory") - return f.Rmdir(dir) + return f.Rmdir(ctx, dir) } // Rmdir removes a container but not if not empty -func Rmdir(f fs.Fs, dir string) error { - err := TryRmdir(f, dir) +func Rmdir(ctx context.Context, f fs.Fs, dir string) error { + err := TryRmdir(ctx, f, dir) if err != nil { fs.CountError(err) return err @@ -1042,7 +1042,7 @@ func Rmdir(f fs.Fs, dir string) error { } // Purge removes a directory and all of its contents -func Purge(f fs.Fs, dir string) error { +func Purge(ctx context.Context, f fs.Fs, dir string) error { doFallbackPurge := true var err error if dir == "" { @@ -1052,7 +1052,7 @@ func Purge(f fs.Fs, dir string) error { if fs.Config.DryRun { fs.Logf(f, "Not purging as --dry-run set") } else { - err = doPurge() + err = doPurge(ctx) if err == fs.ErrorCantPurge { doFallbackPurge = true } @@ -1061,11 +1061,11 @@ func Purge(f fs.Fs, dir string) error { } if doFallbackPurge { // DeleteFiles and Rmdir observe --dry-run - err = DeleteFiles(listToChan(f, dir)) + err = DeleteFiles(ctx, listToChan(ctx, f, dir)) if err != nil { return err } - err = Rmdirs(f, dir, false) + err = Rmdirs(ctx, f, dir, false) } if err != nil { fs.CountError(err) @@ -1076,13 +1076,13 @@ func Purge(f fs.Fs, dir string) error { // Delete removes all the contents of a container. Unlike Purge, it // obeys includes and excludes. -func Delete(f fs.Fs) error { +func Delete(ctx context.Context, f fs.Fs) error { delChan := make(fs.ObjectsChan, fs.Config.Transfers) delErr := make(chan error, 1) go func() { - delErr <- DeleteFiles(delChan) + delErr <- DeleteFiles(ctx, delChan) }() - err := ListFn(f, func(o fs.Object) { + err := ListFn(ctx, f, func(o fs.Object) { delChan <- o }) close(delChan) @@ -1099,11 +1099,11 @@ func Delete(f fs.Fs) error { // channel. // // If the error was ErrorDirNotFound then it will be ignored -func listToChan(f fs.Fs, dir string) fs.ObjectsChan { +func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan { o := make(fs.ObjectsChan, fs.Config.Checkers) go func() { defer close(o) - err := walk.ListR(f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { + err := walk.ListR(ctx, f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { entries.ForObject(func(obj fs.Object) { o <- obj }) @@ -1119,7 +1119,7 @@ func listToChan(f fs.Fs, dir string) fs.ObjectsChan { } // CleanUp removes the trash for the Fs -func CleanUp(f fs.Fs) error { +func CleanUp(ctx context.Context, f fs.Fs) error { doCleanUp := f.Features().CleanUp if doCleanUp == nil { return errors.Errorf("%v doesn't support cleanup", f) @@ -1128,7 +1128,7 @@ func CleanUp(f fs.Fs) error { fs.Logf(f, "Not running cleanup as --dry-run set") return nil } - return doCleanUp() + return doCleanUp(ctx) } // wrap a Reader and a Closer together into a ReadCloser @@ -1145,9 +1145,9 @@ type readCloser struct { // // if count < 0 then it will be ignored // if count >= 0 then only that many characters will be output -func Cat(f fs.Fs, w io.Writer, offset, count int64) error { +func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error { var mu sync.Mutex - return ListFn(f, func(o fs.Object) { + return ListFn(ctx, f, func(o fs.Object) { var err error accounting.Stats.Transferring(o.Remote()) defer func() { @@ -1165,7 +1165,7 @@ func Cat(f fs.Fs, w io.Writer, offset, count int64) error { if opt.Start > 0 || opt.End >= 0 { options = append(options, &opt) } - in, err := o.Open(options...) + in, err := o.Open(ctx, options...) if err != nil { fs.CountError(err) fs.Errorf(o, "Failed to open: %v", err) @@ -1198,7 +1198,7 @@ func Cat(f fs.Fs, w io.Writer, offset, count int64) error { } // Rcat reads data from the Reader until EOF and uploads it to a file on remote -func Rcat(fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) { +func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) { accounting.Stats.Transferring(dstFileName) in = accounting.NewAccountSizeName(in, -1, dstFileName).WithBuffer() defer func() { @@ -1218,7 +1218,7 @@ func Rcat(fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) ( compare := func(dst fs.Object) error { src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst) - if !Equal(src, dst) { + if !Equal(ctx, src, dst) { err = errors.Errorf("corrupted on transfer") fs.CountError(err) fs.Errorf(dst, "%v", err) @@ -1232,7 +1232,7 @@ func Rcat(fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) ( if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF { fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n) src := object.NewMemoryObject(dstFileName, modTime, buf[:n]) - return Copy(fdst, nil, dstFileName, src) + return Copy(ctx, fdst, nil, dstFileName, src) } // Make a new ReadCloser with the bits we've already read @@ -1250,7 +1250,7 @@ func Rcat(fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) ( return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file") } defer func() { - err := Purge(tmpLocalFs, "") + err := Purge(ctx, tmpLocalFs, "") if err != nil { fs.Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err) } @@ -1266,7 +1266,7 @@ func Rcat(fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) ( } objInfo := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil) - if dst, err = fStreamTo.Features().PutStream(in, objInfo, hashOption); err != nil { + if dst, err = fStreamTo.Features().PutStream(ctx, in, objInfo, hashOption); err != nil { return dst, err } if err = compare(dst); err != nil { @@ -1274,26 +1274,26 @@ func Rcat(fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) ( } if !canStream { // copy dst (which is the local object we have just streamed to) to the remote - return Copy(fdst, nil, dstFileName, dst) + return Copy(ctx, fdst, nil, dstFileName, dst) } return dst, nil } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. -func PublicLink(f fs.Fs, remote string) (string, error) { +func PublicLink(ctx context.Context, f fs.Fs, remote string) (string, error) { doPublicLink := f.Features().PublicLink if doPublicLink == nil { return "", errors.Errorf("%v doesn't support public links", f) } - return doPublicLink(remote) + return doPublicLink(ctx, remote) } // Rmdirs removes any empty directories (or directories only // containing empty directories) under f, including f. -func Rmdirs(f fs.Fs, dir string, leaveRoot bool) error { +func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error { dirEmpty := make(map[string]bool) dirEmpty[dir] = !leaveRoot - err := walk.Walk(f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { + err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { fs.CountError(err) fs.Errorf(f, "Failed to list %q: %v", dirPath, err) @@ -1340,7 +1340,7 @@ func Rmdirs(f fs.Fs, dir string, leaveRoot bool) error { sort.Strings(toDelete) for i := len(toDelete) - 1; i >= 0; i-- { dir := toDelete[i] - err := TryRmdir(f, dir) + err := TryRmdir(ctx, f, dir) if err != nil { fs.CountError(err) fs.Errorf(dir, "Failed to rmdir: %v", err) @@ -1355,7 +1355,7 @@ func Rmdirs(f fs.Fs, dir string, leaveRoot bool) error { // // Returns a flag which indicates whether the file needs to be // transferred or not. -func NeedTransfer(dst, src fs.Object) bool { +func NeedTransfer(ctx context.Context, dst, src fs.Object) bool { if dst == nil { fs.Debugf(src, "Couldn't find file - need to transfer") return true @@ -1372,8 +1372,8 @@ func NeedTransfer(dst, src fs.Object) bool { } // If UpdateOlder is in effect, skip if dst is newer than src if fs.Config.UpdateOlder { - srcModTime := src.ModTime() - dstModTime := dst.ModTime() + srcModTime := src.ModTime(ctx) + dstModTime := dst.ModTime(ctx) dt := dstModTime.Sub(srcModTime) // If have a mutually agreed precision then use that modifyWindow := fs.GetModifyWindow(dst.Fs(), src.Fs()) @@ -1398,7 +1398,7 @@ func NeedTransfer(dst, src fs.Object) bool { } } else { // Check to see if changed or not - if Equal(src, dst) { + if Equal(ctx, src, dst) { fs.Debugf(src, "Unchanged skipping") return false } @@ -1408,7 +1408,7 @@ func NeedTransfer(dst, src fs.Object) bool { // RcatSize reads data from the Reader until EOF and uploads it to a file on remote. // Pass in size >=0 if known, <0 if not known -func RcatSize(fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (dst fs.Object, err error) { +func RcatSize(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (dst fs.Object, err error) { var obj fs.Object if size >= 0 { @@ -1434,7 +1434,7 @@ func RcatSize(fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modT accounting.Stats.DoneTransferring(dstFileName, err == nil) }() info := object.NewStaticObjectInfo(dstFileName, modTime, size, true, nil, fdst) - obj, err = fdst.Put(in, info) + obj, err = fdst.Put(ctx, in, info) if err != nil { fs.Errorf(dstFileName, "Post request put error: %v", err) @@ -1442,7 +1442,7 @@ func RcatSize(fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modT } } else { // Size unknown use Rcat - obj, err = Rcat(fdst, dstFileName, in, modTime) + obj, err = Rcat(ctx, fdst, dstFileName, in, modTime) if err != nil { fs.Errorf(dstFileName, "Post request rcat error: %v", err) @@ -1454,7 +1454,7 @@ func RcatSize(fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modT } // CopyURL copies the data from the url to (fdst, dstFileName) -func CopyURL(fdst fs.Fs, dstFileName string, url string) (dst fs.Object, err error) { +func CopyURL(ctx context.Context, fdst fs.Fs, dstFileName string, url string) (dst fs.Object, err error) { client := fshttp.NewClient(fs.Config) resp, err := client.Get(url) @@ -1462,11 +1462,11 @@ func CopyURL(fdst fs.Fs, dstFileName string, url string) (dst fs.Object, err err return nil, err } defer fs.CheckClose(resp.Body, &err) - return RcatSize(fdst, dstFileName, resp.Body, resp.ContentLength, time.Now()) + return RcatSize(ctx, fdst, dstFileName, resp.Body, resp.ContentLength, time.Now()) } // moveOrCopyFile moves or copies a single file possibly to a new name -func moveOrCopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) { +func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) { dstFilePath := path.Join(fdst.Root(), dstFileName) srcFilePath := path.Join(fsrc.Root(), srcFileName) if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath { @@ -1481,13 +1481,13 @@ func moveOrCopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName stri } // Find src object - srcObj, err := fsrc.NewObject(srcFileName) + srcObj, err := fsrc.NewObject(ctx, srcFileName) if err != nil { return err } // Find dst object if it exists - dstObj, err := fdst.NewObject(dstFileName) + dstObj, err := fdst.NewObject(ctx, dstFileName) if err == fs.ErrorObjectNotFound { dstObj = nil } else if err != nil { @@ -1501,7 +1501,7 @@ func moveOrCopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName stri if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) { // Create random name to temporarily move file to tmpObjName := dstFileName + "-rclone-move-" + random(8) - _, err := fdst.NewObject(tmpObjName) + _, err := fdst.NewObject(ctx, tmpObjName) if err != fs.ErrorObjectNotFound { if err == nil { return errors.New("found an already existing file with a randomly generated name. Try the operation again") @@ -1509,17 +1509,17 @@ func moveOrCopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName stri return errors.Wrap(err, "error while attempting to move file to a temporary location") } accounting.Stats.Transferring(srcFileName) - tmpObj, err := Op(fdst, nil, tmpObjName, srcObj) + tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj) if err != nil { accounting.Stats.DoneTransferring(srcFileName, false) return errors.Wrap(err, "error while moving file to temporary location") } - _, err = Op(fdst, nil, dstFileName, tmpObj) + _, err = Op(ctx, fdst, nil, dstFileName, tmpObj) accounting.Stats.DoneTransferring(srcFileName, err == nil) return err } - if NeedTransfer(dstObj, srcObj) { + if NeedTransfer(ctx, dstObj, srcObj) { // If destination already exists, then we must move it into --backup-dir if required if dstObj != nil && fs.Config.BackupDir != "" { backupDir, err := cache.Get(fs.Config.BackupDir) @@ -1527,8 +1527,8 @@ func moveOrCopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName stri return errors.Wrap(err, "creating Fs for --backup-dir failed") } remoteWithSuffix := SuffixName(dstObj.Remote()) - overwritten, _ := backupDir.NewObject(remoteWithSuffix) - _, err = Move(backupDir, overwritten, remoteWithSuffix, dstObj) + overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix) + _, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dstObj) if err != nil { return errors.Wrap(err, "moving to --backup-dir failed") } @@ -1536,11 +1536,11 @@ func moveOrCopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName stri dstObj = nil } - _, err = Op(fdst, dstObj, dstFileName, srcObj) + _, err = Op(ctx, fdst, dstObj, dstFileName, srcObj) } else { accounting.Stats.Checking(srcFileName) if !cp { - err = DeleteFile(srcObj) + err = DeleteFile(ctx, srcObj) } defer accounting.Stats.DoneChecking(srcFileName) } @@ -1559,18 +1559,18 @@ func random(length int) string { } // MoveFile moves a single file possibly to a new name -func MoveFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { - return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, false) +func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { + return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false) } // CopyFile moves a single file possibly to a new name -func CopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { - return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, true) +func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { + return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true) } // SetTier changes tier of object in remote -func SetTier(fsrc fs.Fs, tier string) error { - return ListFn(fsrc, func(o fs.Object) { +func SetTier(ctx context.Context, fsrc fs.Fs, tier string) error { + return ListFn(ctx, fsrc, func(o fs.Object) { objImpl, ok := o.(fs.SetTierer) if !ok { fs.Errorf(fsrc, "Remote object does not implement SetTier") @@ -1732,14 +1732,14 @@ func (l *ListFormat) Format(entry *ListJSONItem) (result string) { // // It does this by loading the directory tree into memory (using ListR // if available) and doing renames in parallel. -func DirMove(f fs.Fs, srcRemote, dstRemote string) (err error) { +func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) { // Use DirMove if possible if doDirMove := f.Features().DirMove; doDirMove != nil { - return doDirMove(f, srcRemote, dstRemote) + return doDirMove(ctx, f, srcRemote, dstRemote) } // Load the directory tree into memory - tree, err := walk.NewDirTree(f, srcRemote, true, -1) + tree, err := walk.NewDirTree(ctx, f, srcRemote, true, -1) if err != nil { return errors.Wrap(err, "RenameDir tree walk") } @@ -1750,7 +1750,7 @@ func DirMove(f fs.Fs, srcRemote, dstRemote string) (err error) { // Make the destination directories - must be done in order not in parallel for _, dir := range dirs { dstPath := dstRemote + dir[len(srcRemote):] - err := f.Mkdir(dstPath) + err := f.Mkdir(ctx, dstPath) if err != nil { return errors.Wrap(err, "RenameDir mkdir") } @@ -1766,8 +1766,8 @@ func DirMove(f fs.Fs, srcRemote, dstRemote string) (err error) { for i := 0; i < fs.Config.Transfers; i++ { g.Go(func() error { for job := range renames { - dstOverwritten, _ := f.NewObject(job.newPath) - _, err := Move(f, dstOverwritten, job.newPath, job.o) + dstOverwritten, _ := f.NewObject(ctx, job.newPath) + _, err := Move(ctx, f, dstOverwritten, job.newPath, job.o) if err != nil { return err } @@ -1797,7 +1797,7 @@ func DirMove(f fs.Fs, srcRemote, dstRemote string) (err error) { // Remove the source directories in reverse order for i := len(dirs) - 1; i >= 0; i-- { - err := f.Rmdir(dirs[i]) + err := f.Rmdir(ctx, dirs[i]) if err != nil { return errors.Wrap(err, "RenameDir rmdir") } diff --git a/fs/operations/operations_test.go b/fs/operations/operations_test.go index 8634b5618..de28661a8 100644 --- a/fs/operations/operations_test.go +++ b/fs/operations/operations_test.go @@ -21,6 +21,7 @@ package operations_test import ( "bytes" + "context" "errors" "fmt" "io" @@ -62,23 +63,23 @@ func TestMkdir(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - err := operations.Mkdir(r.Fremote, "") + err := operations.Mkdir(context.Background(), r.Fremote, "") require.NoError(t, err) fstest.CheckListing(t, r.Fremote, []fstest.Item{}) - err = operations.Mkdir(r.Fremote, "") + err = operations.Mkdir(context.Background(), r.Fremote, "") require.NoError(t, err) } func TestLsd(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteObject("sub dir/hello world", "hello world", t1) + file1 := r.WriteObject(context.Background(), "sub dir/hello world", "hello world", t1) fstest.CheckItems(t, r.Fremote, file1) var buf bytes.Buffer - err := operations.ListDir(r.Fremote, &buf) + err := operations.ListDir(context.Background(), r.Fremote, &buf) require.NoError(t, err) res := buf.String() assert.Contains(t, res, "sub dir\n") @@ -87,13 +88,13 @@ func TestLsd(t *testing.T) { func TestLs(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) - file2 := r.WriteBoth("empty space", "", t2) + file1 := r.WriteBoth(context.Background(), "potato2", "------------------------------------------------------------", t1) + file2 := r.WriteBoth(context.Background(), "empty space", "", t2) fstest.CheckItems(t, r.Fremote, file1, file2) var buf bytes.Buffer - err := operations.List(r.Fremote, &buf) + err := operations.List(context.Background(), r.Fremote, &buf) require.NoError(t, err) res := buf.String() assert.Contains(t, res, " 0 empty space\n") @@ -103,8 +104,8 @@ func TestLs(t *testing.T) { func TestLsWithFilesFrom(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) - file2 := r.WriteBoth("empty space", "", t2) + file1 := r.WriteBoth(context.Background(), "potato2", "------------------------------------------------------------", t1) + file2 := r.WriteBoth(context.Background(), "empty space", "", t2) fstest.CheckItems(t, r.Fremote, file1, file2) @@ -122,7 +123,7 @@ func TestLsWithFilesFrom(t *testing.T) { }() var buf bytes.Buffer - err = operations.List(r.Fremote, &buf) + err = operations.List(context.Background(), r.Fremote, &buf) require.NoError(t, err) assert.Equal(t, " 60 potato2\n", buf.String()) @@ -134,7 +135,7 @@ func TestLsWithFilesFrom(t *testing.T) { }() buf.Reset() - err = operations.List(r.Fremote, &buf) + err = operations.List(context.Background(), r.Fremote, &buf) require.NoError(t, err) assert.Equal(t, " 60 potato2\n", buf.String()) } @@ -142,13 +143,13 @@ func TestLsWithFilesFrom(t *testing.T) { func TestLsLong(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) - file2 := r.WriteBoth("empty space", "", t2) + file1 := r.WriteBoth(context.Background(), "potato2", "------------------------------------------------------------", t1) + file2 := r.WriteBoth(context.Background(), "empty space", "", t2) fstest.CheckItems(t, r.Fremote, file1, file2) var buf bytes.Buffer - err := operations.ListLong(r.Fremote, &buf) + err := operations.ListLong(context.Background(), r.Fremote, &buf) require.NoError(t, err) res := buf.String() lines := strings.Split(strings.Trim(res, "\n"), "\n") @@ -187,15 +188,15 @@ func TestLsLong(t *testing.T) { func TestHashSums(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) - file2 := r.WriteBoth("empty space", "", t2) + file1 := r.WriteBoth(context.Background(), "potato2", "------------------------------------------------------------", t1) + file2 := r.WriteBoth(context.Background(), "empty space", "", t2) fstest.CheckItems(t, r.Fremote, file1, file2) // MD5 Sum var buf bytes.Buffer - err := operations.Md5sum(r.Fremote, &buf) + err := operations.Md5sum(context.Background(), r.Fremote, &buf) require.NoError(t, err) res := buf.String() if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") && @@ -212,7 +213,7 @@ func TestHashSums(t *testing.T) { // SHA1 Sum buf.Reset() - err = operations.Sha1sum(r.Fremote, &buf) + err = operations.Sha1sum(context.Background(), r.Fremote, &buf) require.NoError(t, err) res = buf.String() if !strings.Contains(res, "da39a3ee5e6b4b0d3255bfef95601890afd80709 empty space\n") && @@ -229,7 +230,7 @@ func TestHashSums(t *testing.T) { // Dropbox Hash Sum buf.Reset() - err = operations.DropboxHashSum(r.Fremote, &buf) + err = operations.DropboxHashSum(context.Background(), r.Fremote, &buf) require.NoError(t, err) res = buf.String() if !strings.Contains(res, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 empty space\n") && @@ -274,9 +275,9 @@ func TestSuffixName(t *testing.T) { func TestCount(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) - file2 := r.WriteBoth("empty space", "", t2) - file3 := r.WriteBoth("sub dir/potato3", "hello", t2) + file1 := r.WriteBoth(context.Background(), "potato2", "------------------------------------------------------------", t1) + file2 := r.WriteBoth(context.Background(), "empty space", "", t2) + file3 := r.WriteBoth(context.Background(), "sub dir/potato3", "hello", t2) fstest.CheckItems(t, r.Fremote, file1, file2, file3) @@ -284,7 +285,7 @@ func TestCount(t *testing.T) { fs.Config.MaxDepth = 1 defer func() { fs.Config.MaxDepth = -1 }() - objects, size, err := operations.Count(r.Fremote) + objects, size, err := operations.Count(context.Background(), r.Fremote) require.NoError(t, err) assert.Equal(t, int64(2), objects) assert.Equal(t, int64(60), size) @@ -293,9 +294,9 @@ func TestCount(t *testing.T) { func TestDelete(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteObject("small", "1234567890", t2) // 10 bytes - file2 := r.WriteObject("medium", "------------------------------------------------------------", t1) // 60 bytes - file3 := r.WriteObject("large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes + file1 := r.WriteObject(context.Background(), "small", "1234567890", t2) // 10 bytes + file2 := r.WriteObject(context.Background(), "medium", "------------------------------------------------------------", t1) // 60 bytes + file3 := r.WriteObject(context.Background(), "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes fstest.CheckItems(t, r.Fremote, file1, file2, file3) filter.Active.Opt.MaxSize = 60 @@ -303,12 +304,12 @@ func TestDelete(t *testing.T) { filter.Active.Opt.MaxSize = -1 }() - err := operations.Delete(r.Fremote) + err := operations.Delete(context.Background(), r.Fremote) require.NoError(t, err) fstest.CheckItems(t, r.Fremote, file3) } -func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs, oneway bool) error) { +func testCheck(t *testing.T, checkFunction func(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error) { r := fstest.NewRun(t) defer r.Finalise() @@ -320,7 +321,7 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs, oneway bool) e defer func() { log.SetOutput(os.Stderr) }() - err := checkFunction(r.Fremote, r.Flocal, oneway) + err := checkFunction(context.Background(), r.Fremote, r.Flocal, oneway) gotErrors := accounting.Stats.GetErrors() gotChecks := accounting.Stats.GetChecks() if wantErrors == 0 && err != nil { @@ -341,7 +342,7 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs, oneway bool) e fs.Debugf(r.Fremote, "%d: Ending check test", i) } - file1 := r.WriteBoth("rutabaga", "is tasty", t3) + file1 := r.WriteBoth(context.Background(), "rutabaga", "is tasty", t3) fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Flocal, file1) check(1, 0, 1, false) @@ -350,15 +351,15 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs, oneway bool) e fstest.CheckItems(t, r.Flocal, file1, file2) check(2, 1, 1, false) - file3 := r.WriteObject("empty space", "", t2) + file3 := r.WriteObject(context.Background(), "empty space", "", t2) fstest.CheckItems(t, r.Fremote, file1, file3) check(3, 2, 1, false) file2r := file2 if fs.Config.SizeOnly { - file2r = r.WriteObject("potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1) + file2r = r.WriteObject(context.Background(), "potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1) } else { - r.WriteObject("potato2", "------------------------------------------------------------", t1) + r.WriteObject(context.Background(), "potato2", "------------------------------------------------------------", t1) } fstest.CheckItems(t, r.Fremote, file1, file2r, file3) check(4, 1, 2, false) @@ -367,7 +368,7 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs, oneway bool) e fstest.CheckItems(t, r.Flocal, file1, file2, file3) check(5, 0, 3, false) - file4 := r.WriteObject("remotepotato", "------------------------------------------------------------", t1) + file4 := r.WriteObject(context.Background(), "remotepotato", "------------------------------------------------------------", t1) fstest.CheckItems(t, r.Fremote, file1, file2r, file3, file4) check(6, 1, 3, false) check(7, 0, 3, true) @@ -390,8 +391,8 @@ func TestCheckSizeOnly(t *testing.T) { func TestCat(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("file1", "ABCDEFGHIJ", t1) - file2 := r.WriteBoth("file2", "012345678", t2) + file1 := r.WriteBoth(context.Background(), "file1", "ABCDEFGHIJ", t1) + file2 := r.WriteBoth(context.Background(), "file2", "012345678", t2) fstest.CheckItems(t, r.Fremote, file1, file2) @@ -407,7 +408,7 @@ func TestCat(t *testing.T) { {1, 3, "BCD", "123"}, } { var buf bytes.Buffer - err := operations.Cat(r.Fremote, &buf, test.offset, test.count) + err := operations.Cat(context.Background(), r.Fremote, &buf, test.offset, test.count) require.NoError(t, err) res := buf.String() @@ -440,11 +441,11 @@ func TestRcat(t *testing.T) { path2 := prefix + "big_file_from_pipe" in := ioutil.NopCloser(strings.NewReader(data1)) - _, err := operations.Rcat(r.Fremote, path1, in, t1) + _, err := operations.Rcat(context.Background(), r.Fremote, path1, in, t1) require.NoError(t, err) in = ioutil.NopCloser(strings.NewReader(data2)) - _, err = operations.Rcat(r.Fremote, path2, in, t2) + _, err = operations.Rcat(context.Background(), r.Fremote, path2, in, t2) require.NoError(t, err) file1 := fstest.NewItem(path1, data1, t1) @@ -459,21 +460,21 @@ func TestRcat(t *testing.T) { func TestPurge(t *testing.T) { r := fstest.NewRunIndividual(t) // make new container (azureblob has delayed mkdir after rmdir) defer r.Finalise() - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) // Make some files and dirs - r.ForceMkdir(r.Fremote) - file1 := r.WriteObject("A1/B1/C1/one", "aaa", t1) + r.ForceMkdir(context.Background(), r.Fremote) + file1 := r.WriteObject(context.Background(), "A1/B1/C1/one", "aaa", t1) //..and dirs we expect to delete - require.NoError(t, operations.Mkdir(r.Fremote, "A2")) - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2")) - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2/C2")) - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C3")) - require.NoError(t, operations.Mkdir(r.Fremote, "A3")) - require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3")) - require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3/C4")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A2")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B2")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B2/C2")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B1/C3")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A3")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A3/B3")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A3/B3/C4")) //..and one more file at the end - file2 := r.WriteObject("A1/two", "bbb", t2) + file2 := r.WriteObject(context.Background(), "A1/two", "bbb", t2) fstest.CheckListingWithPrecision( t, @@ -496,7 +497,7 @@ func TestPurge(t *testing.T) { fs.GetModifyWindow(r.Fremote), ) - require.NoError(t, operations.Purge(r.Fremote, "A1/B1")) + require.NoError(t, operations.Purge(context.Background(), r.Fremote, "A1/B1")) fstest.CheckListingWithPrecision( t, @@ -516,7 +517,7 @@ func TestPurge(t *testing.T) { fs.GetModifyWindow(r.Fremote), ) - require.NoError(t, operations.Purge(r.Fremote, "")) + require.NoError(t, operations.Purge(context.Background(), r.Fremote, "")) fstest.CheckListingWithPrecision( t, @@ -531,21 +532,21 @@ func TestPurge(t *testing.T) { func TestRmdirsNoLeaveRoot(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) // Make some files and dirs we expect to keep - r.ForceMkdir(r.Fremote) - file1 := r.WriteObject("A1/B1/C1/one", "aaa", t1) + r.ForceMkdir(context.Background(), r.Fremote) + file1 := r.WriteObject(context.Background(), "A1/B1/C1/one", "aaa", t1) //..and dirs we expect to delete - require.NoError(t, operations.Mkdir(r.Fremote, "A2")) - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2")) - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2/C2")) - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C3")) - require.NoError(t, operations.Mkdir(r.Fremote, "A3")) - require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3")) - require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3/C4")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A2")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B2")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B2/C2")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B1/C3")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A3")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A3/B3")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A3/B3/C4")) //..and one more file at the end - file2 := r.WriteObject("A1/two", "bbb", t2) + file2 := r.WriteObject(context.Background(), "A1/two", "bbb", t2) fstest.CheckListingWithPrecision( t, @@ -568,7 +569,7 @@ func TestRmdirsNoLeaveRoot(t *testing.T) { fs.GetModifyWindow(r.Fremote), ) - require.NoError(t, operations.Rmdirs(r.Fremote, "A3/B3/C4", false)) + require.NoError(t, operations.Rmdirs(context.Background(), r.Fremote, "A3/B3/C4", false)) fstest.CheckListingWithPrecision( t, @@ -590,7 +591,7 @@ func TestRmdirsNoLeaveRoot(t *testing.T) { fs.GetModifyWindow(r.Fremote), ) - require.NoError(t, operations.Rmdirs(r.Fremote, "", false)) + require.NoError(t, operations.Rmdirs(context.Background(), r.Fremote, "", false)) fstest.CheckListingWithPrecision( t, @@ -611,13 +612,13 @@ func TestRmdirsNoLeaveRoot(t *testing.T) { func TestRmdirsLeaveRoot(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) - r.ForceMkdir(r.Fremote) + r.ForceMkdir(context.Background(), r.Fremote) - require.NoError(t, operations.Mkdir(r.Fremote, "A1")) - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1")) - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C1")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B1")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B1/C1")) fstest.CheckListingWithPrecision( t, @@ -631,7 +632,7 @@ func TestRmdirsLeaveRoot(t *testing.T) { fs.GetModifyWindow(r.Fremote), ) - require.NoError(t, operations.Rmdirs(r.Fremote, "A1", true)) + require.NoError(t, operations.Rmdirs(context.Background(), r.Fremote, "A1", true)) fstest.CheckListingWithPrecision( t, @@ -653,7 +654,7 @@ func TestRcatSize(t *testing.T) { file2 := r.WriteFile("potato2", body, t2) // Test with known length bodyReader := ioutil.NopCloser(strings.NewReader(body)) - obj, err := operations.RcatSize(r.Fremote, file1.Path, bodyReader, int64(len(body)), file1.ModTime) + obj, err := operations.RcatSize(context.Background(), r.Fremote, file1.Path, bodyReader, int64(len(body)), file1.ModTime) require.NoError(t, err) assert.Equal(t, int64(len(body)), obj.Size()) assert.Equal(t, file1.Path, obj.Remote()) @@ -661,7 +662,7 @@ func TestRcatSize(t *testing.T) { // Test with unknown length bodyReader = ioutil.NopCloser(strings.NewReader(body)) // reset Reader ioutil.NopCloser(strings.NewReader(body)) - obj, err = operations.RcatSize(r.Fremote, file2.Path, bodyReader, -1, file2.ModTime) + obj, err = operations.RcatSize(context.Background(), r.Fremote, file2.Path, bodyReader, -1, file2.ModTime) require.NoError(t, err) assert.Equal(t, int64(len(body)), obj.Size()) assert.Equal(t, file2.Path, obj.Remote()) @@ -677,7 +678,7 @@ func TestCopyURL(t *testing.T) { contents := "file contents\n" file1 := r.WriteFile("file1", contents, t1) file2 := r.WriteFile("file2", contents, t1) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) fstest.CheckItems(t, r.Fremote) // check when reading from regular HTTP server @@ -688,7 +689,7 @@ func TestCopyURL(t *testing.T) { ts := httptest.NewServer(handler) defer ts.Close() - o, err := operations.CopyURL(r.Fremote, "file1", ts.URL) + o, err := operations.CopyURL(context.Background(), r.Fremote, "file1", ts.URL) require.NoError(t, err) assert.Equal(t, int64(len(contents)), o.Size()) @@ -704,7 +705,7 @@ func TestCopyURL(t *testing.T) { tss := httptest.NewTLSServer(handler) defer tss.Close() - o, err = operations.CopyURL(r.Fremote, "file2", tss.URL) + o, err = operations.CopyURL(context.Background(), r.Fremote, "file2", tss.URL) require.NoError(t, err) assert.Equal(t, int64(len(contents)), o.Size()) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported) @@ -720,7 +721,7 @@ func TestMoveFile(t *testing.T) { file2 := file1 file2.Path = "sub/file2" - err := operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path) + err := operations.MoveFile(context.Background(), r.Fremote, r.Flocal, file2.Path, file1.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal) fstest.CheckItems(t, r.Fremote, file2) @@ -728,12 +729,12 @@ func TestMoveFile(t *testing.T) { r.WriteFile("file1", "file1 contents", t1) fstest.CheckItems(t, r.Flocal, file1) - err = operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path) + err = operations.MoveFile(context.Background(), r.Fremote, r.Flocal, file2.Path, file1.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal) fstest.CheckItems(t, r.Fremote, file2) - err = operations.MoveFile(r.Fremote, r.Fremote, file2.Path, file2.Path) + err = operations.MoveFile(context.Background(), r.Fremote, r.Fremote, file2.Path, file2.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal) fstest.CheckItems(t, r.Fremote, file2) @@ -752,7 +753,7 @@ func TestCaseInsensitiveMoveFile(t *testing.T) { file2 := file1 file2.Path = "sub/file2" - err := operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path) + err := operations.MoveFile(context.Background(), r.Fremote, r.Flocal, file2.Path, file1.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal) fstest.CheckItems(t, r.Fremote, file2) @@ -760,7 +761,7 @@ func TestCaseInsensitiveMoveFile(t *testing.T) { r.WriteFile("file1", "file1 contents", t1) fstest.CheckItems(t, r.Flocal, file1) - err = operations.MoveFile(r.Fremote, r.Flocal, file2.Path, file1.Path) + err = operations.MoveFile(context.Background(), r.Fremote, r.Flocal, file2.Path, file1.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal) fstest.CheckItems(t, r.Fremote, file2) @@ -768,7 +769,7 @@ func TestCaseInsensitiveMoveFile(t *testing.T) { file2Capitalized := file2 file2Capitalized.Path = "sub/File2" - err = operations.MoveFile(r.Fremote, r.Fremote, file2Capitalized.Path, file2.Path) + err = operations.MoveFile(context.Background(), r.Fremote, r.Fremote, file2Capitalized.Path, file2.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal) fstest.CheckItems(t, r.Fremote, file2Capitalized) @@ -787,10 +788,10 @@ func TestMoveFileBackupDir(t *testing.T) { file1 := r.WriteFile("dst/file1", "file1 contents", t1) fstest.CheckItems(t, r.Flocal, file1) - file1old := r.WriteObject("dst/file1", "file1 contents old", t1) + file1old := r.WriteObject(context.Background(), "dst/file1", "file1 contents old", t1) fstest.CheckItems(t, r.Fremote, file1old) - err := operations.MoveFile(r.Fremote, r.Flocal, file1.Path, file1.Path) + err := operations.MoveFile(context.Background(), r.Fremote, r.Flocal, file1.Path, file1.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal) file1old.Path = "backup/dst/file1" @@ -807,17 +808,17 @@ func TestCopyFile(t *testing.T) { file2 := file1 file2.Path = "sub/file2" - err := operations.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path) + err := operations.CopyFile(context.Background(), r.Fremote, r.Flocal, file2.Path, file1.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote, file2) - err = operations.CopyFile(r.Fremote, r.Flocal, file2.Path, file1.Path) + err = operations.CopyFile(context.Background(), r.Fremote, r.Flocal, file2.Path, file1.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote, file2) - err = operations.CopyFile(r.Fremote, r.Fremote, file2.Path, file2.Path) + err = operations.CopyFile(context.Background(), r.Fremote, r.Fremote, file2.Path, file2.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote, file2) @@ -836,10 +837,10 @@ func TestCopyFileBackupDir(t *testing.T) { file1 := r.WriteFile("dst/file1", "file1 contents", t1) fstest.CheckItems(t, r.Flocal, file1) - file1old := r.WriteObject("dst/file1", "file1 contents old", t1) + file1old := r.WriteObject(context.Background(), "dst/file1", "file1 contents old", t1) fstest.CheckItems(t, r.Fremote, file1old) - err := operations.CopyFile(r.Fremote, r.Flocal, file1.Path, file1.Path) + err := operations.CopyFile(context.Background(), r.Fremote, r.Flocal, file1.Path, file1.Path) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) file1old.Path = "backup/dst/file1" @@ -1135,19 +1136,19 @@ func TestDirMove(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) // Make some files and dirs - r.ForceMkdir(r.Fremote) + r.ForceMkdir(context.Background(), r.Fremote) files := []fstest.Item{ - r.WriteObject("A1/one", "one", t1), - r.WriteObject("A1/two", "two", t2), - r.WriteObject("A1/B1/three", "three", t3), - r.WriteObject("A1/B1/C1/four", "four", t1), - r.WriteObject("A1/B1/C2/five", "five", t2), + r.WriteObject(context.Background(), "A1/one", "one", t1), + r.WriteObject(context.Background(), "A1/two", "two", t2), + r.WriteObject(context.Background(), "A1/B1/three", "three", t3), + r.WriteObject(context.Background(), "A1/B1/C1/four", "four", t1), + r.WriteObject(context.Background(), "A1/B1/C2/five", "five", t2), } - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2")) - require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C3")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B2")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "A1/B1/C3")) fstest.CheckListingWithPrecision( t, @@ -1164,7 +1165,7 @@ func TestDirMove(t *testing.T) { fs.GetModifyWindow(r.Fremote), ) - require.NoError(t, operations.DirMove(r.Fremote, "A1", "A2")) + require.NoError(t, operations.DirMove(context.Background(), r.Fremote, "A1", "A2")) for i := range files { files[i].Path = strings.Replace(files[i].Path, "A1/", "A2/", -1) @@ -1194,7 +1195,7 @@ func TestDirMove(t *testing.T) { features.DirMove = oldDirMove }() - require.NoError(t, operations.DirMove(r.Fremote, "A2", "A3")) + require.NoError(t, operations.DirMove(context.Background(), r.Fremote, "A2", "A3")) for i := range files { files[i].Path = strings.Replace(files[i].Path, "A2/", "A3/", -1) diff --git a/fs/operations/rc.go b/fs/operations/rc.go index 351f7cccc..6dd39fe17 100644 --- a/fs/operations/rc.go +++ b/fs/operations/rc.go @@ -1,6 +1,7 @@ package operations import ( + "context" "strings" "github.com/ncw/rclone/fs" @@ -36,7 +37,7 @@ See the [lsjson command](/commands/rclone_lsjson/) for more information on the a } // List the directory -func rcList(in rc.Params) (out rc.Params, err error) { +func rcList(ctx context.Context, in rc.Params) (out rc.Params, err error) { f, remote, err := rc.GetFsAndRemote(in) if err != nil { return nil, err @@ -47,7 +48,7 @@ func rcList(in rc.Params) (out rc.Params, err error) { return nil, err } var list = []*ListJSONItem{} - err = ListJSON(f, remote, &opt, func(item *ListJSONItem) error { + err = ListJSON(ctx, f, remote, &opt, func(item *ListJSONItem) error { list = append(list, item) return nil }) @@ -77,7 +78,7 @@ See the [about command](/commands/rclone_size/) command for more information on } // About the remote -func rcAbout(in rc.Params) (out rc.Params, err error) { +func rcAbout(ctx context.Context, in rc.Params) (out rc.Params, err error) { f, err := rc.GetFs(in) if err != nil { return nil, err @@ -86,7 +87,7 @@ func rcAbout(in rc.Params) (out rc.Params, err error) { if doAbout == nil { return nil, errors.Errorf("%v doesn't support about", f) } - u, err := doAbout() + u, err := doAbout(ctx) if err != nil { return nil, errors.Wrap(err, "about call failed") } @@ -107,8 +108,8 @@ func init() { rc.Add(rc.Call{ Path: "operations/" + strings.ToLower(name) + "file", AuthRequired: true, - Fn: func(in rc.Params) (rc.Params, error) { - return rcMoveOrCopyFile(in, copy) + Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) { + return rcMoveOrCopyFile(ctx, in, copy) }, Title: name + " a file from source remote to destination remote", Help: `This takes the following parameters @@ -123,7 +124,7 @@ func init() { } // Copy a file -func rcMoveOrCopyFile(in rc.Params, cp bool) (out rc.Params, err error) { +func rcMoveOrCopyFile(ctx context.Context, in rc.Params, cp bool) (out rc.Params, err error) { srcFs, srcRemote, err := rc.GetFsAndRemoteNamed(in, "srcFs", "srcRemote") if err != nil { return nil, err @@ -132,7 +133,7 @@ func rcMoveOrCopyFile(in rc.Params, cp bool) (out rc.Params, err error) { if err != nil { return nil, err } - return nil, moveOrCopyFile(dstFs, srcFs, dstRemote, srcRemote, cp) + return nil, moveOrCopyFile(ctx, dstFs, srcFs, dstRemote, srcRemote, cp) } func init() { @@ -159,8 +160,8 @@ func init() { rc.Add(rc.Call{ Path: "operations/" + op.name, AuthRequired: true, - Fn: func(in rc.Params) (rc.Params, error) { - return rcSingleCommand(in, op.name, op.noRemote) + Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) { + return rcSingleCommand(ctx, in, op.name, op.noRemote) }, Title: op.title, Help: `This takes the following parameters @@ -174,7 +175,7 @@ See the [` + op.name + ` command](/commands/rclone_` + op.name + `/) command for } // Run a single command, eg Mkdir -func rcSingleCommand(in rc.Params, name string, noRemote bool) (out rc.Params, err error) { +func rcSingleCommand(ctx context.Context, in rc.Params, name string, noRemote bool) (out rc.Params, err error) { var ( f fs.Fs remote string @@ -189,34 +190,34 @@ func rcSingleCommand(in rc.Params, name string, noRemote bool) (out rc.Params, e } switch name { case "mkdir": - return nil, Mkdir(f, remote) + return nil, Mkdir(ctx, f, remote) case "rmdir": - return nil, Rmdir(f, remote) + return nil, Rmdir(ctx, f, remote) case "purge": - return nil, Purge(f, remote) + return nil, Purge(ctx, f, remote) case "rmdirs": leaveRoot, err := in.GetBool("leaveRoot") if rc.NotErrParamNotFound(err) { return nil, err } - return nil, Rmdirs(f, remote, leaveRoot) + return nil, Rmdirs(ctx, f, remote, leaveRoot) case "delete": - return nil, Delete(f) + return nil, Delete(ctx, f) case "deletefile": - o, err := f.NewObject(remote) + o, err := f.NewObject(ctx, remote) if err != nil { return nil, err } - return nil, DeleteFile(o) + return nil, DeleteFile(ctx, o) case "copyurl": url, err := in.GetString("url") if err != nil { return nil, err } - _, err = CopyURL(f, remote, url) + _, err = CopyURL(ctx, f, remote, url) return nil, err case "cleanup": - return nil, CleanUp(f) + return nil, CleanUp(ctx, f) } panic("unknown rcSingleCommand type") } @@ -242,12 +243,12 @@ See the [size command](/commands/rclone_size/) command for more information on t } // Size a directory -func rcSize(in rc.Params) (out rc.Params, err error) { +func rcSize(ctx context.Context, in rc.Params) (out rc.Params, err error) { f, err := rc.GetFs(in) if err != nil { return nil, err } - count, bytes, err := Count(f) + count, bytes, err := Count(ctx, f) if err != nil { return nil, err } @@ -278,12 +279,12 @@ See the [link command](/commands/rclone_link/) command for more information on t } // Make a public link -func rcPublicLink(in rc.Params) (out rc.Params, err error) { +func rcPublicLink(ctx context.Context, in rc.Params) (out rc.Params, err error) { f, remote, err := rc.GetFsAndRemote(in) if err != nil { return nil, err } - url, err := PublicLink(f, remote) + url, err := PublicLink(ctx, f, remote) if err != nil { return nil, err } @@ -357,7 +358,7 @@ This command does not have a command line equivalent so use this instead: } // Fsinfo the remote -func rcFsInfo(in rc.Params) (out rc.Params, err error) { +func rcFsInfo(ctx context.Context, in rc.Params) (out rc.Params, err error) { f, err := rc.GetFs(in) if err != nil { return nil, err diff --git a/fs/operations/rc_test.go b/fs/operations/rc_test.go index c3c16bfd2..8ea6c2415 100644 --- a/fs/operations/rc_test.go +++ b/fs/operations/rc_test.go @@ -1,6 +1,7 @@ package operations_test import ( + "context" "net/http" "net/http/httptest" "testing" @@ -31,7 +32,7 @@ func rcNewRun(t *testing.T, method string) (*fstest.Run, *rc.Call) { func TestRcAbout(t *testing.T) { r, call := rcNewRun(t, "operations/about") defer r.Finalise() - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) // Will get an error if remote doesn't support About expectedErr := r.Fremote.Features().About == nil @@ -39,7 +40,7 @@ func TestRcAbout(t *testing.T) { in := rc.Params{ "fs": r.FremoteName, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) if expectedErr { assert.Error(t, err) return @@ -58,7 +59,7 @@ func TestRcCleanup(t *testing.T) { in := rc.Params{ "fs": r.LocalName, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.Error(t, err) assert.Equal(t, rc.Params(nil), out) assert.Contains(t, err.Error(), "doesn't support cleanup") @@ -69,7 +70,7 @@ func TestRcCopyfile(t *testing.T) { r, call := rcNewRun(t, "operations/copyfile") defer r.Finalise() file1 := r.WriteFile("file1", "file1 contents", t1) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote) @@ -79,7 +80,7 @@ func TestRcCopyfile(t *testing.T) { "dstFs": r.FremoteName, "dstRemote": "file1-renamed", } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -94,7 +95,7 @@ func TestRcCopyurl(t *testing.T) { defer r.Finalise() contents := "file1 contents\n" file1 := r.WriteFile("file1", contents, t1) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) fstest.CheckItems(t, r.Fremote) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -108,7 +109,7 @@ func TestRcCopyurl(t *testing.T) { "remote": "file1", "url": ts.URL, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -120,15 +121,15 @@ func TestRcDelete(t *testing.T) { r, call := rcNewRun(t, "operations/delete") defer r.Finalise() - file1 := r.WriteObject("small", "1234567890", t2) // 10 bytes - file2 := r.WriteObject("medium", "------------------------------------------------------------", t1) // 60 bytes - file3 := r.WriteObject("large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes + file1 := r.WriteObject(context.Background(), "small", "1234567890", t2) // 10 bytes + file2 := r.WriteObject(context.Background(), "medium", "------------------------------------------------------------", t1) // 60 bytes + file3 := r.WriteObject(context.Background(), "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes fstest.CheckItems(t, r.Fremote, file1, file2, file3) in := rc.Params{ "fs": r.FremoteName, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -140,15 +141,15 @@ func TestRcDeletefile(t *testing.T) { r, call := rcNewRun(t, "operations/deletefile") defer r.Finalise() - file1 := r.WriteObject("small", "1234567890", t2) // 10 bytes - file2 := r.WriteObject("medium", "------------------------------------------------------------", t1) // 60 bytes + file1 := r.WriteObject(context.Background(), "small", "1234567890", t2) // 10 bytes + file2 := r.WriteObject(context.Background(), "medium", "------------------------------------------------------------", t1) // 60 bytes fstest.CheckItems(t, r.Fremote, file1, file2) in := rc.Params{ "fs": r.FremoteName, "remote": "small", } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -160,8 +161,8 @@ func TestRcList(t *testing.T) { r, call := rcNewRun(t, "operations/list") defer r.Finalise() - file1 := r.WriteObject("a", "a", t1) - file2 := r.WriteObject("subdir/b", "bb", t2) + file1 := r.WriteObject(context.Background(), "a", "a", t1) + file2 := r.WriteObject(context.Background(), "subdir/b", "bb", t2) fstest.CheckItems(t, r.Fremote, file1, file2) @@ -169,7 +170,7 @@ func TestRcList(t *testing.T) { "fs": r.FremoteName, "remote": "", } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) list := out["list"].([]*operations.ListJSONItem) @@ -201,7 +202,7 @@ func TestRcList(t *testing.T) { "recurse": true, }, } - out, err = call.Fn(in) + out, err = call.Fn(context.Background(), in) require.NoError(t, err) list = out["list"].([]*operations.ListJSONItem) @@ -224,7 +225,7 @@ func TestRcList(t *testing.T) { func TestRcMkdir(t *testing.T) { r, call := rcNewRun(t, "operations/mkdir") defer r.Finalise() - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(r.Fremote)) @@ -232,7 +233,7 @@ func TestRcMkdir(t *testing.T) { "fs": r.FremoteName, "remote": "subdir", } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -244,7 +245,7 @@ func TestRcMovefile(t *testing.T) { r, call := rcNewRun(t, "operations/movefile") defer r.Finalise() file1 := r.WriteFile("file1", "file1 contents", t1) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote) @@ -254,7 +255,7 @@ func TestRcMovefile(t *testing.T) { "dstFs": r.FremoteName, "dstRemote": "file1-renamed", } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -267,7 +268,7 @@ func TestRcMovefile(t *testing.T) { func TestRcPurge(t *testing.T) { r, call := rcNewRun(t, "operations/purge") defer r.Finalise() - file1 := r.WriteObject("subdir/file1", "subdir/file1 contents", t1) + file1 := r.WriteObject(context.Background(), "subdir/file1", "subdir/file1 contents", t1) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"subdir"}, fs.GetModifyWindow(r.Fremote)) @@ -275,7 +276,7 @@ func TestRcPurge(t *testing.T) { "fs": r.FremoteName, "remote": "subdir", } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -286,8 +287,8 @@ func TestRcPurge(t *testing.T) { func TestRcRmdir(t *testing.T) { r, call := rcNewRun(t, "operations/rmdir") defer r.Finalise() - r.Mkdir(r.Fremote) - assert.NoError(t, r.Fremote.Mkdir("subdir")) + r.Mkdir(context.Background(), r.Fremote) + assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir")) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir"}, fs.GetModifyWindow(r.Fremote)) @@ -295,7 +296,7 @@ func TestRcRmdir(t *testing.T) { "fs": r.FremoteName, "remote": "subdir", } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -306,9 +307,9 @@ func TestRcRmdir(t *testing.T) { func TestRcRmdirs(t *testing.T) { r, call := rcNewRun(t, "operations/rmdirs") defer r.Finalise() - r.Mkdir(r.Fremote) - assert.NoError(t, r.Fremote.Mkdir("subdir")) - assert.NoError(t, r.Fremote.Mkdir("subdir/subsubdir")) + r.Mkdir(context.Background(), r.Fremote) + assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir")) + assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir/subsubdir")) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir", "subdir/subsubdir"}, fs.GetModifyWindow(r.Fremote)) @@ -316,21 +317,21 @@ func TestRcRmdirs(t *testing.T) { "fs": r.FremoteName, "remote": "subdir", } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(r.Fremote)) - assert.NoError(t, r.Fremote.Mkdir("subdir")) - assert.NoError(t, r.Fremote.Mkdir("subdir/subsubdir")) + assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir")) + assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir/subsubdir")) in = rc.Params{ "fs": r.FremoteName, "remote": "subdir", "leaveRoot": true, } - out, err = call.Fn(in) + out, err = call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -342,15 +343,15 @@ func TestRcRmdirs(t *testing.T) { func TestRcSize(t *testing.T) { r, call := rcNewRun(t, "operations/size") defer r.Finalise() - file1 := r.WriteObject("small", "1234567890", t2) // 10 bytes - file2 := r.WriteObject("subdir/medium", "------------------------------------------------------------", t1) // 60 bytes - file3 := r.WriteObject("subdir/subsubdir/large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 50 bytes + file1 := r.WriteObject(context.Background(), "small", "1234567890", t2) // 10 bytes + file2 := r.WriteObject(context.Background(), "subdir/medium", "------------------------------------------------------------", t1) // 60 bytes + file3 := r.WriteObject(context.Background(), "subdir/subsubdir/large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 50 bytes fstest.CheckItems(t, r.Fremote, file1, file2, file3) in := rc.Params{ "fs": r.FremoteName, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params{ "count": int64(3), @@ -366,7 +367,7 @@ func TestRcPublicLink(t *testing.T) { "fs": r.FremoteName, "remote": "", } - _, err := call.Fn(in) + _, err := call.Fn(context.Background(), in) require.Error(t, err) assert.Contains(t, err.Error(), "doesn't support public links") } @@ -378,7 +379,7 @@ func TestRcFsInfo(t *testing.T) { in := rc.Params{ "fs": r.FremoteName, } - got, err := call.Fn(in) + got, err := call.Fn(context.Background(), in) require.NoError(t, err) want := operations.GetFsInfo(r.Fremote) assert.Equal(t, want.Name, got["Name"]) diff --git a/fs/operations/reopen.go b/fs/operations/reopen.go index f51e3d3d6..85b935358 100644 --- a/fs/operations/reopen.go +++ b/fs/operations/reopen.go @@ -1,6 +1,7 @@ package operations import ( + "context" "io" "sync" @@ -10,6 +11,7 @@ import ( // reOpen is a wrapper for an object reader which reopens the stream on error type reOpen struct { + ctx context.Context mu sync.Mutex // mutex to protect the below src fs.Object // object to open hashOption *fs.HashesOption // option to pass to initial open @@ -33,8 +35,9 @@ var ( // // If rangeOption is set then this will applied when reading from the // start, and updated on retries. -func newReOpen(src fs.Object, hashOption *fs.HashesOption, rangeOption *fs.RangeOption, maxTries int) (rc io.ReadCloser, err error) { +func newReOpen(ctx context.Context, src fs.Object, hashOption *fs.HashesOption, rangeOption *fs.RangeOption, maxTries int) (rc io.ReadCloser, err error) { h := &reOpen{ + ctx: ctx, src: src, hashOption: hashOption, rangeOption: rangeOption, @@ -76,7 +79,7 @@ func (h *reOpen) open() error { if h.tries > h.maxTries { h.err = errorTooManyTries } else { - h.rc, h.err = h.src.Open(opts...) + h.rc, h.err = h.src.Open(h.ctx, opts...) } if h.err != nil { if h.tries > 1 { diff --git a/fs/operations/reopen_test.go b/fs/operations/reopen_test.go index fd4c3af16..433553fc6 100644 --- a/fs/operations/reopen_test.go +++ b/fs/operations/reopen_test.go @@ -1,6 +1,7 @@ package operations import ( + "context" "io" "io/ioutil" "testing" @@ -29,8 +30,8 @@ type reOpenTestObject struct { // Open opens the file for read. Call Close() on the returned io.ReadCloser // // This will break after reading the number of bytes in breaks -func (o *reOpenTestObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) { - rc, err := o.Object.Open(options...) +func (o *reOpenTestObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { + rc, err := o.Object.Open(ctx, options...) if err != nil { return nil, err } @@ -82,7 +83,7 @@ func TestReOpen(t *testing.T) { breaks: breaks, } hashOption := &fs.HashesOption{Hashes: hash.NewHashSet(hash.MD5)} - return newReOpen(src, hashOption, rangeOption, maxRetries) + return newReOpen(context.Background(), src, hashOption, rangeOption, maxRetries) } t.Run("Basics", func(t *testing.T) { diff --git a/fs/rc/config.go b/fs/rc/config.go index 6e068c10d..eb4667444 100644 --- a/fs/rc/config.go +++ b/fs/rc/config.go @@ -5,6 +5,8 @@ package rc import ( + "context" + "github.com/pkg/errors" ) @@ -36,7 +38,7 @@ func init() { } // Show the list of all the option blocks -func rcOptionsBlocks(in Params) (out Params, err error) { +func rcOptionsBlocks(ctx context.Context, in Params) (out Params, err error) { options := []string{} for name := range optionBlock { options = append(options, name) @@ -61,7 +63,7 @@ map to the external options very easily with a few exceptions. } // Show the list of all the option blocks -func rcOptionsGet(in Params) (out Params, err error) { +func rcOptionsGet(ctx context.Context, in Params) (out Params, err error) { out = make(Params) for name, options := range optionBlock { out[name] = options @@ -103,7 +105,7 @@ And this sets NOTICE level logs (normal without -v) } // Set an option in an option block -func rcOptionsSet(in Params) (out Params, err error) { +func rcOptionsSet(ctx context.Context, in Params) (out Params, err error) { for name, options := range in { current := optionBlock[name] if current == nil { diff --git a/fs/rc/config_test.go b/fs/rc/config_test.go index 55800d58c..3cc1b3f5a 100644 --- a/fs/rc/config_test.go +++ b/fs/rc/config_test.go @@ -1,6 +1,7 @@ package rc import ( + "context" "fmt" "testing" @@ -47,7 +48,7 @@ func TestOptionsBlocks(t *testing.T) { call := Calls.Get("options/blocks") require.NotNil(t, call) in := Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) assert.Equal(t, Params{"options": []string{"potato"}}, out) @@ -59,7 +60,7 @@ func TestOptionsGet(t *testing.T) { call := Calls.Get("options/get") require.NotNil(t, call) in := Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) assert.Equal(t, Params{"potato": &testOptions}, out) @@ -83,7 +84,7 @@ func TestOptionsSet(t *testing.T) { "Int": 50, }, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.Nil(t, out) assert.Equal(t, 50, testOptions.Int) @@ -91,7 +92,7 @@ func TestOptionsSet(t *testing.T) { assert.Equal(t, 1, reloaded) // error from reload - _, err = call.Fn(in) + _, err = call.Fn(context.Background(), in) require.Error(t, err) assert.Contains(t, err.Error(), "error while reloading") @@ -101,7 +102,7 @@ func TestOptionsSet(t *testing.T) { "Int": 50, }, } - _, err = call.Fn(in) + _, err = call.Fn(context.Background(), in) require.Error(t, err) assert.Contains(t, err.Error(), "unknown option block") @@ -109,7 +110,7 @@ func TestOptionsSet(t *testing.T) { in = Params{ "potato": []string{"a", "b"}, } - _, err = call.Fn(in) + _, err = call.Fn(context.Background(), in) require.Error(t, err) assert.Contains(t, err.Error(), "failed to write options") diff --git a/fs/rc/internal.go b/fs/rc/internal.go index a161f2ab3..0efdf94b2 100644 --- a/fs/rc/internal.go +++ b/fs/rc/internal.go @@ -3,6 +3,7 @@ package rc import ( + "context" "os" "runtime" @@ -35,7 +36,7 @@ check that parameter passing is working properly.`, } // Echo the input to the output parameters -func rcNoop(in Params) (out Params, err error) { +func rcNoop(ctx context.Context, in Params) (out Params, err error) { return in, nil } @@ -51,7 +52,7 @@ Useful for testing error handling.`, } // Return an error regardless -func rcError(in Params) (out Params, err error) { +func rcError(ctx context.Context, in Params) (out Params, err error) { return nil, errors.Errorf("arbitrary error on input %+v", in) } @@ -67,7 +68,7 @@ the commands response.`, } // List the registered commands -func rcList(in Params) (out Params, err error) { +func rcList(ctx context.Context, in Params) (out Params, err error) { out = make(Params) out["commands"] = Calls.List() return out, nil @@ -85,7 +86,7 @@ Useful for stopping rclone process.`, } // Return PID of current process -func rcPid(in Params) (out Params, err error) { +func rcPid(ctx context.Context, in Params) (out Params, err error) { out = make(Params) out["pid"] = os.Getpid() return out, nil @@ -111,7 +112,7 @@ The most interesting values for most people are: } // Return the memory statistics -func rcMemStats(in Params) (out Params, err error) { +func rcMemStats(ctx context.Context, in Params) (out Params, err error) { out = make(Params) var m runtime.MemStats runtime.ReadMemStats(&m) @@ -152,7 +153,7 @@ memory problems. } // Do a garbage collection run -func rcGc(in Params) (out Params, err error) { +func rcGc(ctx context.Context, in Params) (out Params, err error) { runtime.GC() return nil, nil } @@ -177,7 +178,7 @@ This shows the current version of go and the go runtime } // Return version info -func rcVersion(in Params) (out Params, err error) { +func rcVersion(ctx context.Context, in Params) (out Params, err error) { decomposed, err := version.New(fs.Version) if err != nil { return nil, err @@ -209,7 +210,7 @@ Returns } // Return obscured string -func rcObscure(in Params) (out Params, err error) { +func rcObscure(ctx context.Context, in Params) (out Params, err error) { clear, err := in.GetString("clear") if err != nil { return nil, err diff --git a/fs/rc/internal_test.go b/fs/rc/internal_test.go index 39789bdfd..64932f550 100644 --- a/fs/rc/internal_test.go +++ b/fs/rc/internal_test.go @@ -1,6 +1,7 @@ package rc import ( + "context" "runtime" "testing" @@ -18,7 +19,7 @@ func TestInternalNoop(t *testing.T) { "String": "hello", "Int": 42, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) assert.Equal(t, in, out) @@ -28,7 +29,7 @@ func TestInternalError(t *testing.T) { call := Calls.Get("rc/error") assert.NotNil(t, call) in := Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.Error(t, err) require.Nil(t, out) } @@ -37,7 +38,7 @@ func TestInternalList(t *testing.T) { call := Calls.Get("rc/list") assert.NotNil(t, call) in := Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) assert.Equal(t, Params{"commands": Calls.List()}, out) @@ -47,7 +48,7 @@ func TestCorePid(t *testing.T) { call := Calls.Get("core/pid") assert.NotNil(t, call) in := Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) pid := out["pid"] @@ -60,7 +61,7 @@ func TestCoreMemstats(t *testing.T) { call := Calls.Get("core/memstats") assert.NotNil(t, call) in := Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) sys := out["Sys"] @@ -73,7 +74,7 @@ func TestCoreGC(t *testing.T) { call := Calls.Get("core/gc") assert.NotNil(t, call) in := Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.Nil(t, out) assert.Equal(t, Params(nil), out) @@ -83,7 +84,7 @@ func TestCoreVersion(t *testing.T) { call := Calls.Get("core/version") assert.NotNil(t, call) in := Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) assert.Equal(t, fs.Version, out["version"]) @@ -101,7 +102,7 @@ func TestCoreObscure(t *testing.T) { in := Params{ "clear": "potato", } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) assert.Equal(t, in["clear"], obscure.MustReveal(out["obscured"].(string))) diff --git a/fs/rc/job.go b/fs/rc/job.go index 580a67c2d..42ec47dbe 100644 --- a/fs/rc/job.go +++ b/fs/rc/job.go @@ -3,6 +3,7 @@ package rc import ( + "context" "sync" "sync/atomic" "time" @@ -14,14 +15,15 @@ import ( // Job describes a asynchronous task started via the rc package type Job struct { mu sync.Mutex - ID int64 `json:"id"` - StartTime time.Time `json:"startTime"` - EndTime time.Time `json:"endTime"` - Error string `json:"error"` - Finished bool `json:"finished"` - Success bool `json:"success"` - Duration float64 `json:"duration"` - Output Params `json:"output"` + ID int64 `json:"id"` + StartTime time.Time `json:"startTime"` + EndTime time.Time `json:"endTime"` + Error string `json:"error"` + Finished bool `json:"finished"` + Success bool `json:"success"` + Duration float64 `json:"duration"` + Output Params `json:"output"` + Context context.Context `json:"-"` } // Jobs describes a collection of running tasks @@ -121,7 +123,7 @@ func (job *Job) run(fn Func, in Params) { job.finish(nil, errors.Errorf("panic received: %v", r)) } }() - job.finish(fn(in)) + job.finish(fn(job.Context, in)) } // NewJob start a new Job off @@ -129,6 +131,7 @@ func (jobs *Jobs) NewJob(fn Func, in Params) *Job { job := &Job{ ID: atomic.AddInt64(&jobID, 1), StartTime: time.Now(), + Context: context.Background(), } go job.run(fn, in) jobs.mu.Lock() @@ -164,12 +167,13 @@ Results - startTime - time the job started (eg "2018-10-26T18:50:20.528336039+01:00") - success - boolean - true for success false otherwise - output - output of the job as would have been returned if called synchronously +- progress - output of the progress related to the underlying job `, }) } // Returns the status of a job -func rcJobStatus(in Params) (out Params, err error) { +func rcJobStatus(ctx context.Context, in Params) (out Params, err error) { jobID, err := in.GetInt64("jobid") if err != nil { return nil, err @@ -202,7 +206,7 @@ Results } // Returns the status of a job -func rcJobList(in Params) (out Params, err error) { +func rcJobList(ctx context.Context, in Params) (out Params, err error) { out = make(Params) out["jobids"] = running.IDs() return out, nil diff --git a/fs/rc/job_test.go b/fs/rc/job_test.go index 805f6dee4..75eb6e240 100644 --- a/fs/rc/job_test.go +++ b/fs/rc/job_test.go @@ -1,6 +1,7 @@ package rc import ( + "context" "runtime" "testing" "time" @@ -35,7 +36,7 @@ func TestJobsExpire(t *testing.T) { jobs := newJobs() jobs.expireInterval = time.Millisecond assert.Equal(t, false, jobs.expireRunning) - job := jobs.NewJob(func(in Params) (Params, error) { + job := jobs.NewJob(func(ctx context.Context, in Params) (Params, error) { defer close(wait) return in, nil }, Params{}) @@ -56,7 +57,7 @@ func TestJobsExpire(t *testing.T) { jobs.mu.Unlock() } -var noopFn = func(in Params) (Params, error) { +var noopFn = func(ctx context.Context, in Params) (Params, error) { return nil, nil } @@ -80,7 +81,8 @@ func TestJobsGet(t *testing.T) { assert.Nil(t, jobs.Get(123123123123)) } -var longFn = func(in Params) (Params, error) { +var longFn = func(ctx context.Context, in Params) (Params, error) { + // TODO get execution time from context? time.Sleep(1 * time.Hour) return nil, nil } @@ -144,7 +146,7 @@ func TestJobFinish(t *testing.T) { // part of NewJob, now just test the panic catching func TestJobRunPanic(t *testing.T) { wait := make(chan struct{}) - boom := func(in Params) (Params, error) { + boom := func(ctx context.Context, in Params) (Params, error) { sleepJob() defer close(wait) panic("boom") @@ -200,7 +202,7 @@ func TestRcJobStatus(t *testing.T) { call := Calls.Get("job/status") assert.NotNil(t, call) in := Params{"jobid": 1} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) assert.Equal(t, float64(1), out["id"]) @@ -209,12 +211,12 @@ func TestRcJobStatus(t *testing.T) { assert.Equal(t, false, out["success"]) in = Params{"jobid": 123123123} - _, err = call.Fn(in) + _, err = call.Fn(context.Background(), in) require.Error(t, err) assert.Contains(t, err.Error(), "job not found") in = Params{"jobidx": 123123123} - _, err = call.Fn(in) + _, err = call.Fn(context.Background(), in) require.Error(t, err) assert.Contains(t, err.Error(), "Didn't find key") } @@ -227,7 +229,7 @@ func TestRcJobList(t *testing.T) { call := Calls.Get("job/list") assert.NotNil(t, call) in := Params{} - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) require.NotNil(t, out) assert.Equal(t, Params{"jobids": []int64{1}}, out) diff --git a/fs/rc/rcserver/rcserver.go b/fs/rc/rcserver/rcserver.go index 38f3e80ac..c51a1fc75 100644 --- a/fs/rc/rcserver/rcserver.go +++ b/fs/rc/rcserver/rcserver.go @@ -187,7 +187,7 @@ func (s *Server) handlePost(w http.ResponseWriter, r *http.Request, path string) if isAsync { out, err = rc.StartJob(call.Fn, in) } else { - out, err = call.Fn(in) + out, err = call.Fn(r.Context(), in) } if err != nil { writeError(path, in, w, err, http.StatusInternalServerError) @@ -230,7 +230,7 @@ func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string } if path == "" || strings.HasSuffix(path, "/") { path = strings.Trim(path, "/") - entries, err := list.DirSorted(f, false, path) + entries, err := list.DirSorted(r.Context(), f, false, path) if err != nil { writeError(path, nil, w, errors.Wrap(err, "failed to list directory"), http.StatusInternalServerError) return @@ -244,7 +244,7 @@ func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string directory.Serve(w, r) } else { path = strings.Trim(path, "/") - o, err := f.NewObject(path) + o, err := f.NewObject(r.Context(), path) if err != nil { writeError(path, nil, w, errors.Wrap(err, "failed to find object"), http.StatusInternalServerError) return diff --git a/fs/rc/registry.go b/fs/rc/registry.go index 709540c59..34996df17 100644 --- a/fs/rc/registry.go +++ b/fs/rc/registry.go @@ -3,6 +3,7 @@ package rc import ( + "context" "sort" "strings" "sync" @@ -11,7 +12,7 @@ import ( ) // Func defines a type for a remote control function -type Func func(in Params) (out Params, err error) +type Func func(ctx context.Context, in Params) (out Params, err error) // Call defines info about a remote control function and is used in // the Add function to create new entry points. diff --git a/fs/sync/rc.go b/fs/sync/rc.go index 8bbf1a11b..eb90e323a 100644 --- a/fs/sync/rc.go +++ b/fs/sync/rc.go @@ -1,6 +1,8 @@ package sync import ( + "context" + "github.com/ncw/rclone/fs/rc" ) @@ -14,8 +16,8 @@ func init() { rc.Add(rc.Call{ Path: "sync/" + name, AuthRequired: true, - Fn: func(in rc.Params) (rc.Params, error) { - return rcSyncCopyMove(in, name) + Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) { + return rcSyncCopyMove(ctx, in, name) }, Title: name + " a directory from source remote to destination remote", Help: `This takes the following parameters @@ -30,7 +32,7 @@ See the [` + name + ` command](/commands/rclone_` + name + `/) command for more } // Sync/Copy/Move a file -func rcSyncCopyMove(in rc.Params, name string) (out rc.Params, err error) { +func rcSyncCopyMove(ctx context.Context, in rc.Params, name string) (out rc.Params, err error) { srcFs, err := rc.GetFsNamed(in, "srcFs") if err != nil { return nil, err @@ -45,15 +47,15 @@ func rcSyncCopyMove(in rc.Params, name string) (out rc.Params, err error) { } switch name { case "sync": - return nil, Sync(dstFs, srcFs, createEmptySrcDirs) + return nil, Sync(ctx, dstFs, srcFs, createEmptySrcDirs) case "copy": - return nil, CopyDir(dstFs, srcFs, createEmptySrcDirs) + return nil, CopyDir(ctx, dstFs, srcFs, createEmptySrcDirs) case "move": deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs") if rc.NotErrParamNotFound(err) { return nil, err } - return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs, createEmptySrcDirs) + return nil, MoveDir(ctx, dstFs, srcFs, deleteEmptySrcDirs, createEmptySrcDirs) } panic("unknown rcSyncCopyMove type") } diff --git a/fs/sync/rc_test.go b/fs/sync/rc_test.go index 852cff5f2..529fa4964 100644 --- a/fs/sync/rc_test.go +++ b/fs/sync/rc_test.go @@ -1,6 +1,7 @@ package sync import ( + "context" "testing" "github.com/ncw/rclone/fs/cache" @@ -26,11 +27,11 @@ func rcNewRun(t *testing.T, method string) (*fstest.Run, *rc.Call) { func TestRcCopy(t *testing.T) { r, call := rcNewRun(t, "sync/copy") defer r.Finalise() - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) - file1 := r.WriteBoth("file1", "file1 contents", t1) + file1 := r.WriteBoth(context.Background(), "file1", "file1 contents", t1) file2 := r.WriteFile("subdir/file2", "file2 contents", t2) - file3 := r.WriteObject("subdir/subsubdir/file3", "file3 contents", t3) + file3 := r.WriteObject(context.Background(), "subdir/subsubdir/file3", "file3 contents", t3) fstest.CheckItems(t, r.Flocal, file1, file2) fstest.CheckItems(t, r.Fremote, file1, file3) @@ -39,7 +40,7 @@ func TestRcCopy(t *testing.T) { "srcFs": r.LocalName, "dstFs": r.FremoteName, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -51,11 +52,11 @@ func TestRcCopy(t *testing.T) { func TestRcMove(t *testing.T) { r, call := rcNewRun(t, "sync/move") defer r.Finalise() - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) - file1 := r.WriteBoth("file1", "file1 contents", t1) + file1 := r.WriteBoth(context.Background(), "file1", "file1 contents", t1) file2 := r.WriteFile("subdir/file2", "file2 contents", t2) - file3 := r.WriteObject("subdir/subsubdir/file3", "file3 contents", t3) + file3 := r.WriteObject(context.Background(), "subdir/subsubdir/file3", "file3 contents", t3) fstest.CheckItems(t, r.Flocal, file1, file2) fstest.CheckItems(t, r.Fremote, file1, file3) @@ -64,7 +65,7 @@ func TestRcMove(t *testing.T) { "srcFs": r.LocalName, "dstFs": r.FremoteName, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) @@ -76,11 +77,11 @@ func TestRcMove(t *testing.T) { func TestRcSync(t *testing.T) { r, call := rcNewRun(t, "sync/sync") defer r.Finalise() - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) - file1 := r.WriteBoth("file1", "file1 contents", t1) + file1 := r.WriteBoth(context.Background(), "file1", "file1 contents", t1) file2 := r.WriteFile("subdir/file2", "file2 contents", t2) - file3 := r.WriteObject("subdir/subsubdir/file3", "file3 contents", t3) + file3 := r.WriteObject(context.Background(), "subdir/subsubdir/file3", "file3 contents", t3) fstest.CheckItems(t, r.Flocal, file1, file2) fstest.CheckItems(t, r.Fremote, file1, file3) @@ -89,7 +90,7 @@ func TestRcSync(t *testing.T) { "srcFs": r.LocalName, "dstFs": r.FremoteName, } - out, err := call.Fn(in) + out, err := call.Fn(context.Background(), in) require.NoError(t, err) assert.Equal(t, rc.Params(nil), out) diff --git a/fs/sync/sync.go b/fs/sync/sync.go index 2970efa37..c2ce37ae0 100644 --- a/fs/sync/sync.go +++ b/fs/sync/sync.go @@ -65,7 +65,7 @@ type syncCopyMove struct { suffix string // suffix to add to files placed in backupDir } -func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) { +func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) { if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) { return nil, fserrors.FatalError(fs.ErrorOverlapping) } @@ -91,7 +91,7 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de toBeRenamed: newPipe(accounting.Stats.SetRenameQueue, fs.Config.MaxBacklog), trackRenamesCh: make(chan fs.Object, fs.Config.Checkers), } - s.ctx, s.cancel = context.WithCancel(context.Background()) + s.ctx, s.cancel = context.WithCancel(ctx) if s.noTraverse && s.deleteMode != fs.DeleteModeOff { fs.Errorf(nil, "Ignoring --no-traverse with sync") s.noTraverse = false @@ -219,7 +219,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, wg *sync.WaitGroup) { accounting.Stats.Checking(src.Remote()) // Check to see if can store this if src.Storable() { - if operations.NeedTransfer(pair.Dst, pair.Src) { + if operations.NeedTransfer(s.ctx, pair.Dst, pair.Src) { // If files are treated as immutable, fail if destination exists and does not match if fs.Config.Immutable && pair.Dst != nil { fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified") @@ -228,8 +228,8 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, wg *sync.WaitGroup) { // If destination already exists, then we must move it into --backup-dir if required if pair.Dst != nil && s.backupDir != nil { remoteWithSuffix := operations.SuffixName(pair.Dst.Remote()) - overwritten, _ := s.backupDir.NewObject(remoteWithSuffix) - _, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst) + overwritten, _ := s.backupDir.NewObject(s.ctx, remoteWithSuffix) + _, err := operations.Move(s.ctx, s.backupDir, overwritten, remoteWithSuffix, pair.Dst) if err != nil { s.processError(err) } else { @@ -251,7 +251,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, wg *sync.WaitGroup) { // If moving need to delete the files we don't need to copy if s.DoMove { // Delete src if no error on copy - s.processError(operations.DeleteFile(src)) + s.processError(operations.DeleteFile(s.ctx, src)) } } } @@ -280,7 +280,7 @@ func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, wg *sync.WaitGroup) { } // pairCopyOrMove reads Objects on in and moves or copies them. -func (s *syncCopyMove) pairCopyOrMove(in *pipe, fdst fs.Fs, wg *sync.WaitGroup) { +func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs, wg *sync.WaitGroup) { defer wg.Done() var err error for { @@ -290,9 +290,9 @@ func (s *syncCopyMove) pairCopyOrMove(in *pipe, fdst fs.Fs, wg *sync.WaitGroup) } src := pair.Src if s.DoMove { - _, err = operations.Move(fdst, pair.Dst, src.Remote(), src) + _, err = operations.Move(ctx, fdst, pair.Dst, src.Remote(), src) } else { - _, err = operations.Copy(fdst, pair.Dst, src.Remote(), src) + _, err = operations.Copy(ctx, fdst, pair.Dst, src.Remote(), src) } s.processError(err) } @@ -317,7 +317,7 @@ func (s *syncCopyMove) stopCheckers() { func (s *syncCopyMove) startTransfers() { s.transfersWg.Add(fs.Config.Transfers) for i := 0; i < fs.Config.Transfers; i++ { - go s.pairCopyOrMove(s.toBeUploaded, s.fdst, &s.transfersWg) + go s.pairCopyOrMove(s.ctx, s.toBeUploaded, s.fdst, &s.transfersWg) } } @@ -380,7 +380,7 @@ func (s *syncCopyMove) startDeleters() { s.deletersWg.Add(1) go func() { defer s.deletersWg.Done() - err := operations.DeleteFilesWithBackupDir(s.deleteFilesCh, s.backupDir) + err := operations.DeleteFilesWithBackupDir(s.ctx, s.deleteFilesCh, s.backupDir) s.processError(err) }() } @@ -427,12 +427,12 @@ func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error { } close(toDelete) }() - return operations.DeleteFilesWithBackupDir(toDelete, s.backupDir) + return operations.DeleteFilesWithBackupDir(s.ctx, toDelete, s.backupDir) } // This deletes the empty directories in the slice passed in. It // ignores any errors deleting directories -func deleteEmptyDirectories(f fs.Fs, entriesMap map[string]fs.DirEntry) error { +func deleteEmptyDirectories(ctx context.Context, f fs.Fs, entriesMap map[string]fs.DirEntry) error { if len(entriesMap) == 0 { return nil } @@ -454,7 +454,7 @@ func deleteEmptyDirectories(f fs.Fs, entriesMap map[string]fs.DirEntry) error { dir, ok := entry.(fs.Directory) if ok { // TryRmdir only deletes empty directories - err := operations.TryRmdir(f, dir.Remote()) + err := operations.TryRmdir(ctx, f, dir.Remote()) if err != nil { fs.Debugf(fs.LogDirName(f, dir.Remote()), "Failed to Rmdir: %v", err) errorCount++ @@ -476,7 +476,7 @@ func deleteEmptyDirectories(f fs.Fs, entriesMap map[string]fs.DirEntry) error { // This copies the empty directories in the slice passed in and logs // any errors copying the directories -func copyEmptyDirectories(f fs.Fs, entries map[string]fs.DirEntry) error { +func copyEmptyDirectories(ctx context.Context, f fs.Fs, entries map[string]fs.DirEntry) error { if len(entries) == 0 { return nil } @@ -485,7 +485,7 @@ func copyEmptyDirectories(f fs.Fs, entries map[string]fs.DirEntry) error { for _, entry := range entries { dir, ok := entry.(fs.Directory) if ok { - err := operations.Mkdir(f, dir.Remote()) + err := operations.Mkdir(ctx, f, dir.Remote()) if err != nil { fs.Errorf(fs.LogDirName(f, dir.Remote()), "Failed to Mkdir: %v", err) } else { @@ -526,7 +526,7 @@ func (s *syncCopyMove) srcParentDirCheck(entry fs.DirEntry) { // it may return an empty string in which case no hash could be made func (s *syncCopyMove) renameHash(obj fs.Object) (hash string) { var err error - hash, err = obj.Hash(s.commonHash) + hash, err = obj.Hash(s.ctx, s.commonHash) if err != nil { fs.Debugf(obj, "Hash failed: %v", err) return "" @@ -616,10 +616,10 @@ func (s *syncCopyMove) tryRename(src fs.Object) bool { } // Find dst object we are about to overwrite if it exists - dstOverwritten, _ := s.fdst.NewObject(src.Remote()) + dstOverwritten, _ := s.fdst.NewObject(s.ctx, src.Remote()) // Rename dst to have name src.Remote() - _, err := operations.Move(s.fdst, dstOverwritten, src.Remote(), dst) + _, err := operations.Move(s.ctx, s.fdst, dstOverwritten, src.Remote(), dst) if err != nil { fs.Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err) return false @@ -688,7 +688,7 @@ func (s *syncCopyMove) run() error { s.stopDeleters() if s.copyEmptySrcDirs { - s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs)) + s.processError(copyEmptyDirectories(s.ctx, s.fdst, s.srcEmptyDirs)) } // Delete files after @@ -705,7 +705,7 @@ func (s *syncCopyMove) run() error { if s.currentError() != nil && !fs.Config.IgnoreErrors { fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs) } else { - s.processError(deleteEmptyDirectories(s.fdst, s.dstEmptyDirs)) + s.processError(deleteEmptyDirectories(s.ctx, s.fdst, s.dstEmptyDirs)) } } @@ -713,7 +713,7 @@ func (s *syncCopyMove) run() error { // if DoMove and --delete-empty-src-dirs flag is set if s.DoMove && s.deleteEmptySrcDirs { //delete empty subdirectories that were part of the move - s.processError(deleteEmptyDirectories(s.fsrc, s.srcEmptyDirs)) + s.processError(deleteEmptyDirectories(s.ctx, s.fsrc, s.srcEmptyDirs)) } // cancel the context to free resources @@ -802,7 +802,7 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) { } // Match is called when src and dst are present, so sync src to dst -func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) { +func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) { switch srcX := src.(type) { case fs.Object: s.srcEmptyDirsMu.Lock() @@ -852,7 +852,7 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) { // If DoMove is true then files will be moved instead of copied // // dir is the start directory, "" for root -func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { +func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { if deleteMode != fs.DeleteModeOff && DoMove { return fserrors.FatalError(errors.New("can't delete and move at the same time")) } @@ -862,7 +862,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames")) } // only delete stuff during in this pass - do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs) + do, err := newSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs) if err != nil { return err } @@ -873,7 +873,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de // Next pass does a copy only deleteMode = fs.DeleteModeOff } - do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs) + do, err := newSyncCopyMove(ctx, fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs) if err != nil { return err } @@ -881,22 +881,22 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de } // Sync fsrc into fdst -func Sync(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error { - return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs) +func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error { + return runSyncCopyMove(ctx, fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs) } // CopyDir copies fsrc into fdst -func CopyDir(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error { - return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs) +func CopyDir(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error { + return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs) } // moveDir moves fsrc into fdst -func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { - return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs) +func moveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { + return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs) } // MoveDir moves fsrc into fdst -func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { +func MoveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { if operations.Same(fdst, fsrc) { fs.Errorf(fdst, "Nothing to do as source and destination are the same") return nil @@ -909,7 +909,7 @@ func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) e return nil } fs.Debugf(fdst, "Using server side directory move") - err := fdstDirMove(fsrc, "", "") + err := fdstDirMove(ctx, fsrc, "", "") switch err { case fs.ErrorCantDirMove, fs.ErrorDirExists: fs.Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err) @@ -924,5 +924,5 @@ func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) e } // Otherwise move the files one by one - return moveDir(fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs) + return moveDir(ctx, fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs) } diff --git a/fs/sync/sync_test.go b/fs/sync/sync_test.go index 45db3c2d1..95653ea69 100644 --- a/fs/sync/sync_test.go +++ b/fs/sync/sync_test.go @@ -3,6 +3,7 @@ package sync import ( + "context" "runtime" "testing" "time" @@ -38,10 +39,10 @@ func TestCopyWithDryRun(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("sub dir/hello world", "hello world", t1) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) fs.Config.DryRun = true - err := CopyDir(r.Fremote, r.Flocal, false) + err := CopyDir(context.Background(), r.Fremote, r.Flocal, false) fs.Config.DryRun = false require.NoError(t, err) @@ -54,9 +55,9 @@ func TestCopy(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("sub dir/hello world", "hello world", t1) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) - err := CopyDir(r.Fremote, r.Flocal, false) + err := CopyDir(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) @@ -73,7 +74,7 @@ func TestCopyNoTraverse(t *testing.T) { file1 := r.WriteFile("sub dir/hello world", "hello world", t1) - err := CopyDir(r.Fremote, r.Flocal, false) + err := CopyDir(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) @@ -91,7 +92,7 @@ func TestSyncNoTraverse(t *testing.T) { file1 := r.WriteFile("sub dir/hello world", "hello world", t1) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) @@ -109,7 +110,7 @@ func TestCopyWithDepth(t *testing.T) { fs.Config.MaxDepth = 1 defer func() { fs.Config.MaxDepth = -1 }() - err := CopyDir(r.Fremote, r.Flocal, false) + err := CopyDir(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1, file2) @@ -140,7 +141,7 @@ func testCopyWithFilesFrom(t *testing.T, noTraverse bool) { } defer unpatch() - err = CopyDir(r.Fremote, r.Flocal, false) + err = CopyDir(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) unpatch() @@ -155,11 +156,11 @@ func TestCopyEmptyDirectories(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("sub dir/hello world", "hello world", t1) - err := operations.Mkdir(r.Flocal, "sub dir2") + err := operations.Mkdir(context.Background(), r.Flocal, "sub dir2") require.NoError(t, err) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) - err = CopyDir(r.Fremote, r.Flocal, true) + err = CopyDir(context.Background(), r.Fremote, r.Flocal, true) require.NoError(t, err) fstest.CheckListingWithPrecision( @@ -181,11 +182,11 @@ func TestMoveEmptyDirectories(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("sub dir/hello world", "hello world", t1) - err := operations.Mkdir(r.Flocal, "sub dir2") + err := operations.Mkdir(context.Background(), r.Flocal, "sub dir2") require.NoError(t, err) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) - err = MoveDir(r.Fremote, r.Flocal, false, true) + err = MoveDir(context.Background(), r.Fremote, r.Flocal, false, true) require.NoError(t, err) fstest.CheckListingWithPrecision( @@ -207,11 +208,11 @@ func TestSyncEmptyDirectories(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("sub dir/hello world", "hello world", t1) - err := operations.Mkdir(r.Flocal, "sub dir2") + err := operations.Mkdir(context.Background(), r.Flocal, "sub dir2") require.NoError(t, err) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) - err = Sync(r.Fremote, r.Flocal, true) + err = Sync(context.Background(), r.Fremote, r.Flocal, true) require.NoError(t, err) fstest.CheckListingWithPrecision( @@ -232,7 +233,7 @@ func TestSyncEmptyDirectories(t *testing.T) { func TestServerSideCopy(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteObject("sub dir/hello world", "hello world", t1) + file1 := r.WriteObject(context.Background(), "sub dir/hello world", "hello world", t1) fstest.CheckItems(t, r.Fremote, file1) FremoteCopy, _, finaliseCopy, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir) @@ -240,7 +241,7 @@ func TestServerSideCopy(t *testing.T) { defer finaliseCopy() t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy) - err = CopyDir(FremoteCopy, r.Fremote, false) + err = CopyDir(context.Background(), FremoteCopy, r.Fremote, false) require.NoError(t, err) fstest.CheckItems(t, FremoteCopy, file1) @@ -251,14 +252,14 @@ func TestServerSideCopy(t *testing.T) { func TestCopyAfterDelete(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteObject("sub dir/hello world", "hello world", t1) + file1 := r.WriteObject(context.Background(), "sub dir/hello world", "hello world", t1) fstest.CheckItems(t, r.Flocal) fstest.CheckItems(t, r.Fremote, file1) - err := operations.Mkdir(r.Flocal, "") + err := operations.Mkdir(context.Background(), r.Flocal, "") require.NoError(t, err) - err = CopyDir(r.Fremote, r.Flocal, false) + err = CopyDir(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal) @@ -269,10 +270,10 @@ func TestCopyAfterDelete(t *testing.T) { func TestCopyRedownload(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteObject("sub dir/hello world", "hello world", t1) + file1 := r.WriteObject(context.Background(), "sub dir/hello world", "hello world", t1) fstest.CheckItems(t, r.Fremote, file1) - err := CopyDir(r.Flocal, r.Fremote, false) + err := CopyDir(context.Background(), r.Flocal, r.Fremote, false) require.NoError(t, err) // Test with combined precision of local and remote as we copied it there and back @@ -292,7 +293,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) { fstest.CheckItems(t, r.Flocal, file1) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // We should have transferred exactly one file. @@ -304,7 +305,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) { fstest.CheckItems(t, r.Flocal, file2) accounting.Stats.ResetCounters() - err = Sync(r.Fremote, r.Flocal, false) + err = Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // We should have transferred no files @@ -326,7 +327,7 @@ func TestSyncSizeOnly(t *testing.T) { fstest.CheckItems(t, r.Flocal, file1) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // We should have transferred exactly one file. @@ -338,7 +339,7 @@ func TestSyncSizeOnly(t *testing.T) { fstest.CheckItems(t, r.Flocal, file2) accounting.Stats.ResetCounters() - err = Sync(r.Fremote, r.Flocal, false) + err = Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // We should have transferred no files @@ -360,7 +361,7 @@ func TestSyncIgnoreSize(t *testing.T) { fstest.CheckItems(t, r.Flocal, file1) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // We should have transferred exactly one file. @@ -372,7 +373,7 @@ func TestSyncIgnoreSize(t *testing.T) { fstest.CheckItems(t, r.Flocal, file2) accounting.Stats.ResetCounters() - err = Sync(r.Fremote, r.Flocal, false) + err = Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // We should have transferred no files @@ -384,11 +385,11 @@ func TestSyncIgnoreSize(t *testing.T) { func TestSyncIgnoreTimes(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("existing", "potato", t1) + file1 := r.WriteBoth(context.Background(), "existing", "potato", t1) fstest.CheckItems(t, r.Fremote, file1) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // We should have transferred exactly 0 files because the @@ -399,7 +400,7 @@ func TestSyncIgnoreTimes(t *testing.T) { defer func() { fs.Config.IgnoreTimes = false }() accounting.Stats.ResetCounters() - err = Sync(r.Fremote, r.Flocal, false) + err = Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // We should have transferred exactly one file even though the @@ -419,7 +420,7 @@ func TestSyncIgnoreExisting(t *testing.T) { defer func() { fs.Config.IgnoreExisting = false }() accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote, file1) @@ -427,7 +428,7 @@ func TestSyncIgnoreExisting(t *testing.T) { // Change everything r.WriteFile("existing", "newpotatoes", t2) accounting.Stats.ResetCounters() - err = Sync(r.Fremote, r.Flocal, false) + err = Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // Items should not change fstest.CheckItems(t, r.Fremote, file1) @@ -441,9 +442,9 @@ func TestSyncIgnoreErrors(t *testing.T) { r.Finalise() }() file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1) - file2 := r.WriteObject("b/potato", "SMALLER BUT SAME DATE", t2) - file3 := r.WriteBoth("c/non empty space", "AhHa!", t2) - require.NoError(t, operations.Mkdir(r.Fremote, "d")) + file2 := r.WriteObject(context.Background(), "b/potato", "SMALLER BUT SAME DATE", t2) + file3 := r.WriteBoth(context.Background(), "c/non empty space", "AhHa!", t2) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "d")) fstest.CheckListingWithPrecision( t, @@ -475,7 +476,7 @@ func TestSyncIgnoreErrors(t *testing.T) { accounting.Stats.ResetCounters() fs.CountError(errors.New("boom")) - assert.NoError(t, Sync(r.Fremote, r.Flocal, false)) + assert.NoError(t, Sync(context.Background(), r.Fremote, r.Flocal, false)) fstest.CheckListingWithPrecision( t, @@ -509,7 +510,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("empty space", "", t2) - file2 := r.WriteObject("empty space", "", t1) + file2 := r.WriteObject(context.Background(), "empty space", "", t1) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote, file2) @@ -518,7 +519,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) { defer func() { fs.Config.DryRun = false }() accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) @@ -527,7 +528,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) { fs.Config.DryRun = false accounting.Stats.ResetCounters() - err = Sync(r.Fremote, r.Flocal, false) + err = Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) @@ -549,13 +550,13 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) { }() file1 := r.WriteFile("empty space", "", t2) - file2 := r.WriteObject("empty space", "", t1) + file2 := r.WriteObject(context.Background(), "empty space", "", t1) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote, file2) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) @@ -570,13 +571,13 @@ func TestSyncDoesntUpdateModtime(t *testing.T) { } file1 := r.WriteFile("foo", "foo", t2) - file2 := r.WriteObject("foo", "bar", t1) + file2 := r.WriteObject(context.Background(), "foo", "bar", t1) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote, file2) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) @@ -589,14 +590,14 @@ func TestSyncDoesntUpdateModtime(t *testing.T) { func TestSyncAfterAddingAFile(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("empty space", "", t2) + file1 := r.WriteBoth(context.Background(), "empty space", "", t2) file2 := r.WriteFile("potato", "------------------------------------------------------------", t3) fstest.CheckItems(t, r.Flocal, file1, file2) fstest.CheckItems(t, r.Fremote, file1) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1, file2) fstest.CheckItems(t, r.Fremote, file1, file2) @@ -605,13 +606,13 @@ func TestSyncAfterAddingAFile(t *testing.T) { func TestSyncAfterChangingFilesSizeOnly(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteObject("potato", "------------------------------------------------------------", t3) + file1 := r.WriteObject(context.Background(), "potato", "------------------------------------------------------------", t3) file2 := r.WriteFile("potato", "smaller but same date", t3) fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Flocal, file2) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Fremote, file2) @@ -625,16 +626,16 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) { var file1 fstest.Item if r.Fremote.Precision() == fs.ModTimeNotSupported { t.Logf("ModTimeNotSupported so forcing file to be a different size") - file1 = r.WriteObject("potato", "different size to make sure it syncs", t3) + file1 = r.WriteObject(context.Background(), "potato", "different size to make sure it syncs", t3) } else { - file1 = r.WriteObject("potato", "smaller but same date", t3) + file1 = r.WriteObject(context.Background(), "potato", "smaller but same date", t3) } file2 := r.WriteFile("potato", "SMALLER BUT SAME DATE", t2) fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Flocal, file2) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Fremote, file2) @@ -645,12 +646,12 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1) - file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2) - file3 := r.WriteBoth("empty space", "", t2) + file2 := r.WriteObject(context.Background(), "potato", "SMALLER BUT SAME DATE", t2) + file3 := r.WriteBoth(context.Background(), "empty space", "", t2) fs.Config.DryRun = true accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) fs.Config.DryRun = false require.NoError(t, err) @@ -663,13 +664,13 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1) - file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2) - file3 := r.WriteBoth("empty space", "", t2) + file2 := r.WriteObject(context.Background(), "potato", "SMALLER BUT SAME DATE", t2) + file3 := r.WriteBoth(context.Background(), "empty space", "", t2) fstest.CheckItems(t, r.Fremote, file2, file3) fstest.CheckItems(t, r.Flocal, file1, file3) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1, file3) fstest.CheckItems(t, r.Fremote, file1, file3) @@ -680,10 +681,10 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1) - file2 := r.WriteObject("b/potato", "SMALLER BUT SAME DATE", t2) - file3 := r.WriteBoth("c/non empty space", "AhHa!", t2) - require.NoError(t, operations.Mkdir(r.Fremote, "d")) - require.NoError(t, operations.Mkdir(r.Fremote, "d/e")) + file2 := r.WriteObject(context.Background(), "b/potato", "SMALLER BUT SAME DATE", t2) + file3 := r.WriteBoth(context.Background(), "c/non empty space", "AhHa!", t2) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "d")) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "d/e")) fstest.CheckListingWithPrecision( t, @@ -715,7 +716,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) { ) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckListingWithPrecision( @@ -751,9 +752,9 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1) - file2 := r.WriteObject("b/potato", "SMALLER BUT SAME DATE", t2) - file3 := r.WriteBoth("c/non empty space", "AhHa!", t2) - require.NoError(t, operations.Mkdir(r.Fremote, "d")) + file2 := r.WriteObject(context.Background(), "b/potato", "SMALLER BUT SAME DATE", t2) + file3 := r.WriteBoth(context.Background(), "c/non empty space", "AhHa!", t2) + require.NoError(t, operations.Mkdir(context.Background(), r.Fremote, "d")) fstest.CheckListingWithPrecision( t, @@ -785,7 +786,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) { accounting.Stats.ResetCounters() fs.CountError(errors.New("boom")) - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) assert.Equal(t, fs.ErrorNotDeleting, err) fstest.CheckListingWithPrecision( @@ -856,13 +857,13 @@ func TestCopyDeleteBefore(t *testing.T) { fs.Config.DeleteMode = fs.DeleteModeDefault }() - file1 := r.WriteObject("potato", "hopefully not deleted", t1) + file1 := r.WriteObject(context.Background(), "potato", "hopefully not deleted", t1) file2 := r.WriteFile("potato2", "hopefully copied in", t1) fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Flocal, file2) accounting.Stats.ResetCounters() - err := CopyDir(r.Fremote, r.Flocal, false) + err := CopyDir(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Fremote, file1, file2) @@ -873,8 +874,8 @@ func TestCopyDeleteBefore(t *testing.T) { func TestSyncWithExclude(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) - file2 := r.WriteBoth("empty space", "", t2) + file1 := r.WriteBoth(context.Background(), "potato2", "------------------------------------------------------------", t1) + file2 := r.WriteBoth(context.Background(), "empty space", "", t2) file3 := r.WriteFile("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes fstest.CheckItems(t, r.Fremote, file1, file2) fstest.CheckItems(t, r.Flocal, file1, file2, file3) @@ -885,14 +886,14 @@ func TestSyncWithExclude(t *testing.T) { }() accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Fremote, file2, file1) // Now sync the other way round and check enormous doesn't get // deleted as it is excluded from the sync accounting.Stats.ResetCounters() - err = Sync(r.Flocal, r.Fremote, false) + err = Sync(context.Background(), r.Flocal, r.Fremote, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file2, file1, file3) } @@ -901,9 +902,9 @@ func TestSyncWithExclude(t *testing.T) { func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() - file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) // 60 bytes - file2 := r.WriteBoth("empty space", "", t2) - file3 := r.WriteBoth("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes + file1 := r.WriteBoth(context.Background(), "potato2", "------------------------------------------------------------", t1) // 60 bytes + file2 := r.WriteBoth(context.Background(), "empty space", "", t2) + file3 := r.WriteBoth(context.Background(), "enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes fstest.CheckItems(t, r.Fremote, file1, file2, file3) fstest.CheckItems(t, r.Flocal, file1, file2, file3) @@ -915,14 +916,14 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) { }() accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Fremote, file2) // Check sync the other way round to make sure enormous gets // deleted even though it is excluded accounting.Stats.ResetCounters() - err = Sync(r.Flocal, r.Fremote, false) + err = Sync(context.Background(), r.Flocal, r.Fremote, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file2) } @@ -942,10 +943,10 @@ func TestSyncWithUpdateOlder(t *testing.T) { fourF := r.WriteFile("four", "four", t2) fiveF := r.WriteFile("five", "five", t2) fstest.CheckItems(t, r.Flocal, oneF, twoF, threeF, fourF, fiveF) - oneO := r.WriteObject("one", "ONE", t2) - twoO := r.WriteObject("two", "TWO", t2) - threeO := r.WriteObject("three", "THREE", t2plus) - fourO := r.WriteObject("four", "FOURFOUR", t2minus) + oneO := r.WriteObject(context.Background(), "one", "ONE", t2) + twoO := r.WriteObject(context.Background(), "two", "TWO", t2) + threeO := r.WriteObject(context.Background(), "three", "THREE", t2plus) + fourO := r.WriteObject(context.Background(), "four", "FOURFOUR", t2minus) fstest.CheckItems(t, r.Fremote, oneO, twoO, threeO, fourO) fs.Config.UpdateOlder = true @@ -957,7 +958,7 @@ func TestSyncWithUpdateOlder(t *testing.T) { }() accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF) } @@ -981,7 +982,7 @@ func TestSyncWithTrackRenames(t *testing.T) { f2 := r.WriteFile("yam", "Yam Content", t2) accounting.Stats.ResetCounters() - require.NoError(t, Sync(r.Fremote, r.Flocal, false)) + require.NoError(t, Sync(context.Background(), r.Fremote, r.Flocal, false)) fstest.CheckItems(t, r.Fremote, f1, f2) fstest.CheckItems(t, r.Flocal, f1, f2) @@ -990,7 +991,7 @@ func TestSyncWithTrackRenames(t *testing.T) { f2 = r.RenameFile(f2, "yaml") accounting.Stats.ResetCounters() - require.NoError(t, Sync(r.Fremote, r.Flocal, false)) + require.NoError(t, Sync(context.Background(), r.Fremote, r.Flocal, false)) fstest.CheckItems(t, r.Fremote, f1, f2) @@ -1015,12 +1016,12 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty require.NoError(t, err) defer finaliseMove() - file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) - file2 := r.WriteBoth("empty space", "", t2) - file3u := r.WriteBoth("potato3", "------------------------------------------------------------ UPDATED", t2) + file1 := r.WriteBoth(context.Background(), "potato2", "------------------------------------------------------------", t1) + file2 := r.WriteBoth(context.Background(), "empty space", "", t2) + file3u := r.WriteBoth(context.Background(), "potato3", "------------------------------------------------------------ UPDATED", t2) if testDeleteEmptyDirs { - err := operations.Mkdir(r.Fremote, "tomatoDir") + err := operations.Mkdir(context.Background(), r.Fremote, "tomatoDir") require.NoError(t, err) } @@ -1029,13 +1030,13 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty t.Logf("Server side move (if possible) %v -> %v", r.Fremote, FremoteMove) // Write just one file in the new remote - r.WriteObjectTo(FremoteMove, "empty space", "", t2, false) - file3 := r.WriteObjectTo(FremoteMove, "potato3", "------------------------------------------------------------", t1, false) + r.WriteObjectTo(context.Background(), FremoteMove, "empty space", "", t2, false) + file3 := r.WriteObjectTo(context.Background(), FremoteMove, "potato3", "------------------------------------------------------------", t1, false) fstest.CheckItems(t, FremoteMove, file2, file3) // Do server side move accounting.Stats.ResetCounters() - err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs, false) + err = MoveDir(context.Background(), FremoteMove, r.Fremote, testDeleteEmptyDirs, false) require.NoError(t, err) if withFilter { @@ -1056,13 +1057,13 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty defer finaliseMove2() if testDeleteEmptyDirs { - err := operations.Mkdir(FremoteMove, "tomatoDir") + err := operations.Mkdir(context.Background(), FremoteMove, "tomatoDir") require.NoError(t, err) } // Move it back to a new empty remote, dst does not exist this time accounting.Stats.ResetCounters() - err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs, false) + err = MoveDir(context.Background(), FremoteMove2, FremoteMove, testDeleteEmptyDirs, false) require.NoError(t, err) if withFilter { @@ -1084,10 +1085,10 @@ func TestMoveWithDeleteEmptySrcDirs(t *testing.T) { defer r.Finalise() file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file2 := r.WriteFile("nested/sub dir/file", "nested", t1) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) // run move with --delete-empty-src-dirs - err := MoveDir(r.Fremote, r.Flocal, true, false) + err := MoveDir(context.Background(), r.Fremote, r.Flocal, true, false) require.NoError(t, err) fstest.CheckListingWithPrecision( @@ -1105,9 +1106,9 @@ func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) { defer r.Finalise() file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file2 := r.WriteFile("nested/sub dir/file", "nested", t1) - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) - err := MoveDir(r.Fremote, r.Flocal, false, false) + err := MoveDir(context.Background(), r.Fremote, r.Flocal, false, false) require.NoError(t, err) fstest.CheckListingWithPrecision( @@ -1164,11 +1165,11 @@ func TestServerSideMoveOverlap(t *testing.T) { FremoteMove, err := fs.NewFs(subRemoteName) require.NoError(t, err) - file1 := r.WriteObject("potato2", "------------------------------------------------------------", t1) + file1 := r.WriteObject(context.Background(), "potato2", "------------------------------------------------------------", t1) fstest.CheckItems(t, r.Fremote, file1) // Subdir move with no filters should return ErrorCantMoveOverlapping - err = MoveDir(FremoteMove, r.Fremote, false, false) + err = MoveDir(context.Background(), FremoteMove, r.Fremote, false, false) assert.EqualError(t, err, fs.ErrorOverlapping.Error()) // Now try with a filter which should also fail with ErrorCantMoveOverlapping @@ -1176,7 +1177,7 @@ func TestServerSideMoveOverlap(t *testing.T) { defer func() { filter.Active.Opt.MinSize = -1 }() - err = MoveDir(FremoteMove, r.Fremote, false, false) + err = MoveDir(context.Background(), FremoteMove, r.Fremote, false, false) assert.EqualError(t, err, fs.ErrorOverlapping.Error()) } @@ -1195,10 +1196,10 @@ func TestSyncOverlap(t *testing.T) { assert.Equal(t, fs.ErrorOverlapping.Error(), err.Error()) } - checkErr(Sync(FremoteSync, r.Fremote, false)) - checkErr(Sync(r.Fremote, FremoteSync, false)) - checkErr(Sync(r.Fremote, r.Fremote, false)) - checkErr(Sync(FremoteSync, FremoteSync, false)) + checkErr(Sync(context.Background(), FremoteSync, r.Fremote, false)) + checkErr(Sync(context.Background(), r.Fremote, FremoteSync, false)) + checkErr(Sync(context.Background(), r.Fremote, r.Fremote, false)) + checkErr(Sync(context.Background(), FremoteSync, FremoteSync, false)) } // Test with BackupDir set @@ -1209,7 +1210,7 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) { if !operations.CanServerSideMove(r.Fremote) { t.Skip("Skipping test as remote does not support server side move") } - r.Mkdir(r.Fremote) + r.Mkdir(context.Background(), r.Fremote) fs.Config.BackupDir = r.FremoteName + "/backup" fs.Config.Suffix = suffix @@ -1222,9 +1223,9 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) { // Make the setup so we have one, two, three in the dest // and one (different), two (same) in the source - file1 := r.WriteObject("dst/one", "one", t1) - file2 := r.WriteObject("dst/two", "two", t1) - file3 := r.WriteObject("dst/three.txt", "three", t1) + file1 := r.WriteObject(context.Background(), "dst/one", "one", t1) + file2 := r.WriteObject(context.Background(), "dst/two", "two", t1) + file3 := r.WriteObject(context.Background(), "dst/three.txt", "three", t1) file2a := r.WriteFile("two", "two", t1) file1a := r.WriteFile("one", "oneA", t2) @@ -1235,7 +1236,7 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) { require.NoError(t, err) accounting.Stats.ResetCounters() - err = Sync(fdst, r.Flocal, false) + err = Sync(context.Background(), fdst, r.Flocal, false) require.NoError(t, err) // one should be moved to the backup dir and the new one installed @@ -1253,14 +1254,14 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) { // Now check what happens if we do it again // Restore a different three and update one in the source - file3a := r.WriteObject("dst/three.txt", "threeA", t2) + file3a := r.WriteObject(context.Background(), "dst/three.txt", "threeA", t2) file1b := r.WriteFile("one", "oneBB", t3) fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a, file3a) // This should delete three and overwrite one again, checking // the files got overwritten correctly in backup-dir accounting.Stats.ResetCounters() - err = Sync(fdst, r.Flocal, false) + err = Sync(context.Background(), fdst, r.Flocal, false) require.NoError(t, err) // one should be moved to the backup dir and the new one installed @@ -1298,11 +1299,11 @@ func TestSyncUTFNorm(t *testing.T) { file1 := r.WriteFile(Encoding1, "This is a test", t1) fstest.CheckItems(t, r.Flocal, file1) - file2 := r.WriteObject(Encoding2, "This is a old test", t2) + file2 := r.WriteObject(context.Background(), Encoding2, "This is a old test", t2) fstest.CheckItems(t, r.Fremote, file2) accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) // We should have transferred exactly one file, but kept the @@ -1328,7 +1329,7 @@ func TestSyncImmutable(t *testing.T) { // Should succeed accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote, file1) @@ -1340,7 +1341,7 @@ func TestSyncImmutable(t *testing.T) { // Should fail with ErrorImmutableModified and not modify local or remote files accounting.Stats.ResetCounters() - err = Sync(r.Fremote, r.Flocal, false) + err = Sync(context.Background(), r.Fremote, r.Flocal, false) assert.EqualError(t, err, fs.ErrorImmutableModified.Error()) fstest.CheckItems(t, r.Flocal, file2) fstest.CheckItems(t, r.Fremote, file1) @@ -1362,12 +1363,12 @@ func TestSyncIgnoreCase(t *testing.T) { // Create files with different filename casing file1 := r.WriteFile("existing", "potato", t1) fstest.CheckItems(t, r.Flocal, file1) - file2 := r.WriteObject("EXISTING", "potato", t1) + file2 := r.WriteObject(context.Background(), "EXISTING", "potato", t1) fstest.CheckItems(t, r.Fremote, file2) // Should not copy files that are differently-cased but otherwise identical accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) require.NoError(t, err) fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Fremote, file2) @@ -1403,6 +1404,6 @@ func TestAbort(t *testing.T) { accounting.Stats.ResetCounters() - err := Sync(r.Fremote, r.Flocal, false) + err := Sync(context.Background(), r.Fremote, r.Flocal, false) assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err) } diff --git a/fs/walk/walk.go b/fs/walk/walk.go index b6b5ddf12..de4990774 100644 --- a/fs/walk/walk.go +++ b/fs/walk/walk.go @@ -3,6 +3,7 @@ package walk import ( "bytes" + "context" "fmt" "path" "sort" @@ -58,15 +59,15 @@ type Func func(path string, entries fs.DirEntries, err error) error // constructed with just those files in and then walked with WalkR // // NB (f, path) to be replaced by fs.Dir at some point -func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { +func Walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { if fs.Config.NoTraverse && filter.Active.HaveFilesFrom() { - return walkR(f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(f.NewObject)) + return walkR(ctx, f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(ctx, f.NewObject)) } // FIXME should this just be maxLevel < 0 - why the maxLevel > 1 if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil { - return walkListR(f, path, includeAll, maxLevel, fn) + return walkListR(ctx, f, path, includeAll, maxLevel, fn) } - return walkListDirSorted(f, path, includeAll, maxLevel, fn) + return walkListDirSorted(ctx, f, path, includeAll, maxLevel, fn) } // ListType is uses to choose which combination of files or directories is requires @@ -137,7 +138,7 @@ func (l ListType) Filter(in *fs.DirEntries) { // efficient, otherwise by Walk. // // NB (f, path) to be replaced by fs.Dir at some point -func ListR(f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error { +func ListR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error { // FIXME disable this with --no-fast-list ??? `--disable ListR` will do it... doListR := f.Features().ListR @@ -147,15 +148,15 @@ func ListR(f fs.Fs, path string, includeAll bool, maxLevel int, listType ListTyp maxLevel >= 0 || // ...using bounded recursion len(filter.Active.Opt.ExcludeFile) > 0 || // ...using --exclude-file filter.Active.BoundedRecursion() { // ...filters imply bounded recursion - return listRwalk(f, path, includeAll, maxLevel, listType, fn) + return listRwalk(ctx, f, path, includeAll, maxLevel, listType, fn) } - return listR(f, path, includeAll, listType, fn, doListR, listType.Dirs() && f.Features().BucketBased) + return listR(ctx, f, path, includeAll, listType, fn, doListR, listType.Dirs() && f.Features().BucketBased) } // listRwalk walks the file tree for ListR using Walk -func listRwalk(f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error { +func listRwalk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error { var listErr error - walkErr := Walk(f, path, includeAll, maxLevel, func(path string, entries fs.DirEntries, err error) error { + walkErr := Walk(ctx, f, path, includeAll, maxLevel, func(path string, entries fs.DirEntries, err error) error { // Carry on listing but return the error at the end if err != nil { listErr = err @@ -264,8 +265,8 @@ func (dm *dirMap) sendEntries(fn fs.ListRCallback) (err error) { } // listR walks the file tree using ListR -func listR(f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListRCallback, doListR fs.ListRFn, synthesizeDirs bool) error { - includeDirectory := filter.Active.IncludeDirectory(f) +func listR(ctx context.Context, f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListRCallback, doListR fs.ListRFn, synthesizeDirs bool) error { + includeDirectory := filter.Active.IncludeDirectory(ctx, f) if !includeAll { includeAll = filter.Active.InActive() } @@ -274,7 +275,7 @@ func listR(f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListR dm = newDirMap(path) } var mu sync.Mutex - err := doListR(path, func(entries fs.DirEntries) (err error) { + err := doListR(ctx, path, func(entries fs.DirEntries) (err error) { if synthesizeDirs { err = dm.addEntries(entries) if err != nil { @@ -288,7 +289,7 @@ func listR(f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListR var include bool switch x := entry.(type) { case fs.Object: - include = filter.Active.IncludeObject(x) + include = filter.Active.IncludeObject(ctx, x) case fs.Directory: include, err = includeDirectory(x.Remote()) if err != nil { @@ -324,25 +325,25 @@ func listR(f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListR // walkListDirSorted lists the directory. // // It implements Walk using non recursive directory listing. -func walkListDirSorted(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { - return walk(f, path, includeAll, maxLevel, fn, list.DirSorted) +func walkListDirSorted(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { + return walk(ctx, f, path, includeAll, maxLevel, fn, list.DirSorted) } // walkListR lists the directory. // // It implements Walk using recursive directory listing if // available, or returns ErrorCantListR if not. -func walkListR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { +func walkListR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { listR := f.Features().ListR if listR == nil { return ErrorCantListR } - return walkR(f, path, includeAll, maxLevel, fn, listR) + return walkR(ctx, f, path, includeAll, maxLevel, fn, listR) } -type listDirFunc func(fs fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) +type listDirFunc func(ctx context.Context, fs fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) -func walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error { +func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error { var ( wg sync.WaitGroup // sync closing of go routines traversing sync.WaitGroup // running directory traversals @@ -378,7 +379,7 @@ func walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir if !ok { return } - entries, err := listDir(f, includeAll, job.remote) + entries, err := listDir(ctx, f, includeAll, job.remote) var jobs []listJob if err == nil && job.depth != 0 { entries.ForDir(func(dir fs.Directory) { @@ -608,14 +609,14 @@ func (dt DirTree) String() string { return out.String() } -func walkRDirTree(f fs.Fs, startPath string, includeAll bool, maxLevel int, listR fs.ListRFn) (DirTree, error) { +func walkRDirTree(ctx context.Context, f fs.Fs, startPath string, includeAll bool, maxLevel int, listR fs.ListRFn) (DirTree, error) { dirs := make(DirTree) // Entries can come in arbitrary order. We use toPrune to keep // all directories to exclude later. toPrune := make(map[string]bool) - includeDirectory := filter.Active.IncludeDirectory(f) + includeDirectory := filter.Active.IncludeDirectory(ctx, f) var mu sync.Mutex - err := listR(startPath, func(entries fs.DirEntries) error { + err := listR(ctx, startPath, func(entries fs.DirEntries) error { mu.Lock() defer mu.Unlock() for _, entry := range entries { @@ -623,7 +624,7 @@ func walkRDirTree(f fs.Fs, startPath string, includeAll bool, maxLevel int, list switch x := entry.(type) { case fs.Object: // Make sure we don't delete excluded files if not required - if includeAll || filter.Active.IncludeObject(x) { + if includeAll || filter.Active.IncludeObject(ctx, x) { if maxLevel < 0 || slashes <= maxLevel-1 { dirs.add(x) } else { @@ -685,7 +686,7 @@ func walkRDirTree(f fs.Fs, startPath string, includeAll bool, maxLevel int, list } // Create a DirTree using List -func walkNDirTree(f fs.Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (DirTree, error) { +func walkNDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (DirTree, error) { dirs := make(DirTree) fn := func(dirPath string, entries fs.DirEntries, err error) error { if err == nil { @@ -693,7 +694,7 @@ func walkNDirTree(f fs.Fs, path string, includeAll bool, maxLevel int, listDir l } return err } - err := walk(f, path, includeAll, maxLevel, fn, listDir) + err := walk(ctx, f, path, includeAll, maxLevel, fn, listDir) if err != nil { return nil, err } @@ -715,18 +716,18 @@ func walkNDirTree(f fs.Fs, path string, includeAll bool, maxLevel int, listDir l // constructed with just those files in. // // NB (f, path) to be replaced by fs.Dir at some point -func NewDirTree(f fs.Fs, path string, includeAll bool, maxLevel int) (DirTree, error) { +func NewDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int) (DirTree, error) { if fs.Config.NoTraverse && filter.Active.HaveFilesFrom() { - return walkRDirTree(f, path, includeAll, maxLevel, filter.Active.MakeListR(f.NewObject)) + return walkRDirTree(ctx, f, path, includeAll, maxLevel, filter.Active.MakeListR(ctx, f.NewObject)) } if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && ListR != nil { - return walkRDirTree(f, path, includeAll, maxLevel, ListR) + return walkRDirTree(ctx, f, path, includeAll, maxLevel, ListR) } - return walkNDirTree(f, path, includeAll, maxLevel, list.DirSorted) + return walkNDirTree(ctx, f, path, includeAll, maxLevel, list.DirSorted) } -func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR fs.ListRFn) error { - dirs, err := walkRDirTree(f, path, includeAll, maxLevel, listR) +func walkR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR fs.ListRFn) error { + dirs, err := walkRDirTree(ctx, f, path, includeAll, maxLevel, listR) if err != nil { return err } @@ -760,8 +761,8 @@ func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR f } // GetAll runs ListR getting all the results -func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) { - err = ListR(f, path, includeAll, maxLevel, ListAll, func(entries fs.DirEntries) error { +func GetAll(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) { + err = ListR(ctx, f, path, includeAll, maxLevel, ListAll, func(entries fs.DirEntries) error { for _, entry := range entries { switch x := entry.(type) { case fs.Object: diff --git a/fs/walk/walk_test.go b/fs/walk/walk_test.go index 4e29b9961..3f194d625 100644 --- a/fs/walk/walk_test.go +++ b/fs/walk/walk_test.go @@ -1,6 +1,7 @@ package walk import ( + "context" "fmt" "io" "strings" @@ -68,7 +69,7 @@ func (ls *listDirs) SetLevel(maxLevel int) *listDirs { } // ListDir returns the expected listing for the directory -func (ls *listDirs) ListDir(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) { +func (ls *listDirs) ListDir(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) { ls.mu.Lock() defer ls.mu.Unlock() assert.Equal(ls.t, ls.fs, f) @@ -89,7 +90,7 @@ func (ls *listDirs) ListDir(f fs.Fs, includeAll bool, dir string) (entries fs.Di } // ListR returns the expected listing for the directory using ListR -func (ls *listDirs) ListR(dir string, callback fs.ListRCallback) (err error) { +func (ls *listDirs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { ls.mu.Lock() defer ls.mu.Unlock() @@ -150,14 +151,14 @@ func (ls *listDirs) WalkFn(dir string, entries fs.DirEntries, err error) error { // Walk does the walk and tests the expectations func (ls *listDirs) Walk() { - err := walk(nil, "", ls.includeAll, ls.maxLevel, ls.WalkFn, ls.ListDir) + err := walk(context.Background(), nil, "", ls.includeAll, ls.maxLevel, ls.WalkFn, ls.ListDir) assert.Equal(ls.t, ls.finalError, err) ls.IsFinished() } // WalkR does the walkR and tests the expectations func (ls *listDirs) WalkR() { - err := walkR(nil, "", ls.includeAll, ls.maxLevel, ls.WalkFn, ls.ListR) + err := walkR(context.Background(), nil, "", ls.includeAll, ls.maxLevel, ls.WalkFn, ls.ListR) assert.Equal(ls.t, ls.finalError, err) if ls.finalError == nil { ls.IsFinished() @@ -266,7 +267,7 @@ func TestWalkRLevelsNoRecursive10(t *testing.T) { testWalkLevels(t, 10).WalkR() func TestWalkNDirTree(t *testing.T) { ls := testWalkLevels(t, -1) - entries, err := walkNDirTree(nil, "", ls.includeAll, ls.maxLevel, ls.ListDir) + entries, err := walkNDirTree(context.Background(), nil, "", ls.includeAll, ls.maxLevel, ls.ListDir) require.NoError(t, err) assert.Equal(t, `/ A @@ -414,7 +415,7 @@ func TestWalkRMultiErrors(t *testing.T) { testWalkMultiErrors(t).Walk() } // a very simple listRcallback function func makeListRCallback(entries fs.DirEntries, err error) fs.ListRFn { - return func(dir string, callback fs.ListRCallback) error { + return func(ctx context.Context, dir string, callback fs.ListRCallback) error { if err == nil { err = callback(entries) } @@ -559,7 +560,7 @@ a/ b/ `, nil, "", 2}, } { - r, err := walkRDirTree(nil, test.root, true, test.level, makeListRCallback(test.entries, test.err)) + r, err := walkRDirTree(context.Background(), nil, test.root, true, test.level, makeListRCallback(test.entries, test.err)) assert.Equal(t, test.err, err, fmt.Sprintf("%+v", test)) assert.Equal(t, test.want, r.String(), fmt.Sprintf("%+v", test)) } @@ -630,7 +631,7 @@ b/c/d/ `, nil, "", -1, "ign", true}, } { filter.Active.Opt.ExcludeFile = test.excludeFile - r, err := walkRDirTree(nil, test.root, test.includeAll, test.level, makeListRCallback(test.entries, test.err)) + r, err := walkRDirTree(context.Background(), nil, test.root, test.includeAll, test.level, makeListRCallback(test.entries, test.err)) assert.Equal(t, test.err, err, fmt.Sprintf("%+v", test)) assert.Equal(t, test.want, r.String(), fmt.Sprintf("%+v", test)) } @@ -701,7 +702,7 @@ func TestListR(t *testing.T) { } return nil } - doListR := func(dir string, callback fs.ListRCallback) error { + doListR := func(ctx context.Context, dir string, callback fs.ListRCallback) error { var os fs.DirEntries for _, o := range objects { if dir == "" || strings.HasPrefix(o.Remote(), dir+"/") { @@ -725,43 +726,43 @@ func TestListR(t *testing.T) { // Base case clearCallback() - err = listR(f, "", true, ListAll, callback, doListR, false) + err = listR(context.Background(), f, "", true, ListAll, callback, doListR, false) require.NoError(t, err) require.Equal(t, []string{"a", "b", "dir", "dir/a", "dir/b", "dir/c"}, got) // Base case - with Objects clearCallback() - err = listR(f, "", true, ListObjects, callback, doListR, false) + err = listR(context.Background(), f, "", true, ListObjects, callback, doListR, false) require.NoError(t, err) require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/c"}, got) // Base case - with Dirs clearCallback() - err = listR(f, "", true, ListDirs, callback, doListR, false) + err = listR(context.Background(), f, "", true, ListDirs, callback, doListR, false) require.NoError(t, err) require.Equal(t, []string{"dir"}, got) // With filter clearCallback() - err = listR(f, "", false, ListAll, callback, doListR, false) + err = listR(context.Background(), f, "", false, ListAll, callback, doListR, false) require.NoError(t, err) require.Equal(t, []string{"b", "dir", "dir/b"}, got) // With filter - with Objects clearCallback() - err = listR(f, "", false, ListObjects, callback, doListR, false) + err = listR(context.Background(), f, "", false, ListObjects, callback, doListR, false) require.NoError(t, err) require.Equal(t, []string{"b", "dir/b"}, got) // With filter - with Dir clearCallback() - err = listR(f, "", false, ListDirs, callback, doListR, false) + err = listR(context.Background(), f, "", false, ListDirs, callback, doListR, false) require.NoError(t, err) require.Equal(t, []string{"dir"}, got) // With filter and subdir clearCallback() - err = listR(f, "dir", false, ListAll, callback, doListR, false) + err = listR(context.Background(), f, "dir", false, ListAll, callback, doListR, false) require.NoError(t, err) require.Equal(t, []string{"dir/b"}, got) @@ -777,31 +778,31 @@ func TestListR(t *testing.T) { // Base case clearCallback() - err = listR(f, "", true, ListAll, callback, doListR, true) + err = listR(context.Background(), f, "", true, ListAll, callback, doListR, true) require.NoError(t, err) require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/subdir/c", "dir/subdir", "dir"}, got) // With filter clearCallback() - err = listR(f, "", false, ListAll, callback, doListR, true) + err = listR(context.Background(), f, "", false, ListAll, callback, doListR, true) require.NoError(t, err) require.Equal(t, []string{"b", "dir/b", "dir/subdir", "dir"}, got) // With filter and subdir clearCallback() - err = listR(f, "dir", false, ListAll, callback, doListR, true) + err = listR(context.Background(), f, "dir", false, ListAll, callback, doListR, true) require.NoError(t, err) require.Equal(t, []string{"dir/b", "dir/subdir"}, got) // With filter and subdir - with Objects clearCallback() - err = listR(f, "dir", false, ListObjects, callback, doListR, true) + err = listR(context.Background(), f, "dir", false, ListObjects, callback, doListR, true) require.NoError(t, err) require.Equal(t, []string{"dir/b"}, got) // With filter and subdir - with Dirs clearCallback() - err = listR(f, "dir", false, ListDirs, callback, doListR, true) + err = listR(context.Background(), f, "dir", false, ListDirs, callback, doListR, true) require.NoError(t, err) require.Equal(t, []string{"dir/subdir"}, got) } diff --git a/fstest/fstest.go b/fstest/fstest.go index a562d2e27..7464a4642 100644 --- a/fstest/fstest.go +++ b/fstest/fstest.go @@ -5,6 +5,7 @@ package fstest import ( "bytes" + "context" "flag" "fmt" "io" @@ -127,7 +128,7 @@ func (i *Item) CheckHashes(t *testing.T, obj fs.Object) { types := obj.Fs().Hashes().Array() for _, Hash := range types { // Check attributes - sum, err := obj.Hash(Hash) + sum, err := obj.Hash(context.Background(), Hash) require.NoError(t, err) assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum)) } @@ -137,7 +138,7 @@ func (i *Item) CheckHashes(t *testing.T, obj fs.Object) { func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) { i.CheckHashes(t, obj) assert.Equal(t, i.Size, obj.Size(), fmt.Sprintf("%s: size incorrect file=%d vs obj=%d", i.Path, i.Size, obj.Size())) - i.CheckModTime(t, obj, obj.ModTime(), precision) + i.CheckModTime(t, obj, obj.ModTime(context.Background()), precision) } // WinPath converts a path into a windows safe path @@ -282,8 +283,9 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs wantListing1, wantListing2 := makeListingFromItems(items) gotListing := "" listingOK := false + ctx := context.Background() for i := 1; i <= retries; i++ { - objs, dirs, err = walk.GetAll(f, "", true, -1) + objs, dirs, err = walk.GetAll(ctx, f, "", true, -1) if err != nil && err != fs.ErrorDirNotFound { t.Fatalf("Error listing: %v", err) } @@ -456,23 +458,24 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error) // // It logs errors rather than returning them func Purge(f fs.Fs) { + ctx := context.Background() var err error doFallbackPurge := true if doPurge := f.Features().Purge; doPurge != nil { doFallbackPurge = false fs.Debugf(f, "Purge remote") - err = doPurge() + err = doPurge(ctx) if err == fs.ErrorCantPurge { doFallbackPurge = true } } if doFallbackPurge { dirs := []string{""} - err = walk.ListR(f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error { + err = walk.ListR(ctx, f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error { var err error entries.ForObject(func(obj fs.Object) { fs.Debugf(f, "Purge object %q", obj.Remote()) - err = obj.Remove() + err = obj.Remove(ctx) if err != nil { log.Printf("purge failed to remove %q: %v", obj.Remote(), err) } @@ -486,7 +489,7 @@ func Purge(f fs.Fs) { for i := len(dirs) - 1; i >= 0; i-- { dir := dirs[i] fs.Debugf(f, "Purge dir %q", dir) - err := f.Rmdir(dir) + err := f.Rmdir(ctx, dir) if err != nil { log.Printf("purge failed to rmdir %q: %v", dir, err) } diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go index 99f2ba7bd..f652a2857 100644 --- a/fstest/fstests/fstests.go +++ b/fstest/fstests/fstests.go @@ -8,6 +8,7 @@ package fstests import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -119,7 +120,7 @@ func findObject(t *testing.T, f fs.Fs, Name string) fs.Object { var err error sleepTime := 1 * time.Second for i := 1; i <= *fstest.ListRetries; i++ { - obj, err = f.NewObject(Name) + obj, err = f.NewObject(context.Background(), Name) if err == nil { break } @@ -163,7 +164,7 @@ func testPut(t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) { file.Size = int64(buf.Len()) obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) - obj, err = f.Put(in, obji) + obj, err = f.Put(context.Background(), in, obji) return err }) file.Hashes = uploadHash.Sums() @@ -187,7 +188,7 @@ func testPutLarge(t *testing.T, f fs.Fs, file *fstest.Item) { in := io.TeeReader(r, uploadHash) obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) - obj, err = f.Put(in, obji) + obj, err = f.Put(context.Background(), in, obji) return err }) file.Hashes = uploadHash.Sums() @@ -199,7 +200,7 @@ func testPutLarge(t *testing.T, f fs.Fs, file *fstest.Item) { // Download the object and check it is OK downloadHash := hash.NewMultiHasher() - download, err := obj.Open() + download, err := obj.Open(context.Background()) require.NoError(t, err) n, err := io.Copy(downloadHash, download) require.NoError(t, err) @@ -208,7 +209,7 @@ func testPutLarge(t *testing.T, f fs.Fs, file *fstest.Item) { assert.Equal(t, file.Hashes, downloadHash.Sums()) // Remove the object - require.NoError(t, obj.Remove()) + require.NoError(t, obj.Remove(context.Background())) } // errorReader just returns an error on Read @@ -224,7 +225,7 @@ func (er errorReader) Read(p []byte) (n int, err error) { // read the contents of an object as a string func readObject(t *testing.T, obj fs.Object, limit int64, options ...fs.OpenOption) string { what := fmt.Sprintf("readObject(%q) limit=%d, options=%+v", obj, limit, options) - in, err := obj.Open(options...) + in, err := obj.Open(context.Background(), options...) require.NoError(t, err, what) var r io.Reader = in if limit >= 0 { @@ -409,12 +410,12 @@ func Run(t *testing.T, opt *Opt) { if isBucketBasedButNotRoot(remote) { t.Skip("Skipping test as non root bucket based remote") } - err := remote.Rmdir("") + err := remote.Rmdir(context.Background(), "") assert.Error(t, err, "Expecting error on Rmdir non existent") }) // Make the directory - err = remote.Mkdir("") + err = remote.Mkdir(context.Background(), "") require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{}) @@ -452,7 +453,7 @@ func Run(t *testing.T, opt *Opt) { // TestFsRmdirEmpty tests deleting an empty directory t.Run("FsRmdirEmpty", func(t *testing.T) { skipIfNotOk(t) - err := remote.Rmdir("") + err := remote.Rmdir(context.Background(), "") require.NoError(t, err) }) @@ -462,26 +463,26 @@ func Run(t *testing.T, opt *Opt) { t.Run("FsMkdir", func(t *testing.T) { skipIfNotOk(t) - err := remote.Mkdir("") + err := remote.Mkdir(context.Background(), "") require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{}) - err = remote.Mkdir("") + err = remote.Mkdir(context.Background(), "") require.NoError(t, err) // TestFsMkdirRmdirSubdir tests making and removing a sub directory t.Run("FsMkdirRmdirSubdir", func(t *testing.T) { skipIfNotOk(t) dir := "dir/subdir" - err := operations.Mkdir(remote, dir) + err := operations.Mkdir(context.Background(), remote, dir) require.NoError(t, err) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.GetModifyWindow(remote)) - err = operations.Rmdir(remote, dir) + err = operations.Rmdir(context.Background(), remote, dir) require.NoError(t, err) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir"}, fs.GetModifyWindow(remote)) - err = operations.Rmdir(remote, "dir") + err = operations.Rmdir(context.Background(), remote, "dir") require.NoError(t, err) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote)) }) @@ -495,7 +496,7 @@ func Run(t *testing.T, opt *Opt) { // TestFsListDirEmpty tests listing the directories from an empty directory TestFsListDirEmpty := func(t *testing.T) { skipIfNotOk(t) - objs, dirs, err := walk.GetAll(remote, "", true, 1) + objs, dirs, err := walk.GetAll(context.Background(), remote, "", true, 1) require.NoError(t, err) assert.Equal(t, []string{}, objsToNames(objs)) assert.Equal(t, []string{}, dirsToNames(dirs)) @@ -511,7 +512,7 @@ func Run(t *testing.T, opt *Opt) { // TestFsListDirNotFound tests listing the directories from an empty directory TestFsListDirNotFound := func(t *testing.T) { skipIfNotOk(t) - objs, dirs, err := walk.GetAll(remote, "does not exist", true, 1) + objs, dirs, err := walk.GetAll(context.Background(), remote, "does not exist", true, 1) if !remote.Features().CanHaveEmptyDirectories { if err != fs.ErrorDirNotFound { assert.NoError(t, err) @@ -533,11 +534,11 @@ func Run(t *testing.T, opt *Opt) { t.Run("FsNewObjectNotFound", func(t *testing.T) { skipIfNotOk(t) // Object in an existing directory - o, err := remote.NewObject("potato") + o, err := remote.NewObject(context.Background(), "potato") assert.Nil(t, o) assert.Equal(t, fs.ErrorObjectNotFound, err) // Now try an object in a non existing directory - o, err = remote.NewObject("directory/not/found/potato") + o, err = remote.NewObject(context.Background(), "directory/not/found/potato") assert.Nil(t, o) assert.Equal(t, fs.ErrorObjectNotFound, err) }) @@ -559,11 +560,11 @@ func Run(t *testing.T, opt *Opt) { in := io.MultiReader(buf, er) obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 2*N, true, nil, nil) - _, err := remote.Put(in, obji) + _, err := remote.Put(context.Background(), in, obji) // assert.Nil(t, obj) - FIXME some remotes return the object even on nil assert.NotNil(t, err) - obj, err := remote.NewObject(file2.Path) + obj, err := remote.NewObject(context.Background(), file2.Path) assert.Nil(t, obj) assert.Equal(t, fs.ErrorObjectNotFound, err) }) @@ -681,7 +682,7 @@ func Run(t *testing.T, opt *Opt) { t.Skip("FS has no OpenWriterAt interface") } path := "writer-at-subdir/writer-at-file" - out, err := openWriterAt(path, -1) + out, err := openWriterAt(context.Background(), path, -1) require.NoError(t, err) var n int @@ -700,8 +701,8 @@ func Run(t *testing.T, opt *Opt) { obj := findObject(t, remote, path) assert.Equal(t, "abcdefghi", readObject(t, obj, -1), "contents of file differ") - assert.NoError(t, obj.Remove()) - assert.NoError(t, remote.Rmdir("writer-at-subdir")) + assert.NoError(t, obj.Remove(context.Background())) + assert.NoError(t, remote.Rmdir(context.Background(), "writer-at-subdir")) }) // TestFsChangeNotify tests that changes are properly @@ -717,13 +718,13 @@ func Run(t *testing.T, opt *Opt) { t.Skip("FS has no ChangeNotify interface") } - err := operations.Mkdir(remote, "dir") + err := operations.Mkdir(context.Background(), remote, "dir") require.NoError(t, err) pollInterval := make(chan time.Duration) dirChanges := map[string]struct{}{} objChanges := map[string]struct{}{} - doChangeNotify(func(x string, e fs.EntryType) { + doChangeNotify(context.Background(), func(x string, e fs.EntryType) { fs.Debugf(nil, "doChangeNotify(%q, %+v)", x, e) if strings.HasPrefix(x, file1.Path[:5]) || strings.HasPrefix(x, file2.Path[:5]) { fs.Debugf(nil, "Ignoring notify for file1 or file2: %q, %v", x, e) @@ -741,7 +742,7 @@ func Run(t *testing.T, opt *Opt) { var dirs []string for _, idx := range []int{1, 3, 2} { dir := fmt.Sprintf("dir/subdir%d", idx) - err = operations.Mkdir(remote, dir) + err = operations.Mkdir(context.Background(), remote, dir) require.NoError(t, err) dirs = append(dirs, dir) } @@ -786,11 +787,11 @@ func Run(t *testing.T, opt *Opt) { // tidy up afterwards for _, o := range objs { - assert.NoError(t, o.Remove()) + assert.NoError(t, o.Remove(context.Background())) } dirs = append(dirs, "dir") for _, dir := range dirs { - assert.NoError(t, remote.Rmdir(dir)) + assert.NoError(t, remote.Rmdir(context.Background(), dir)) } }) @@ -811,9 +812,9 @@ func Run(t *testing.T, opt *Opt) { list := func(dir string, expectedDirNames, expectedObjNames []string) { var objNames, dirNames []string for i := 1; i <= *fstest.ListRetries; i++ { - objs, dirs, err := walk.GetAll(remote, dir, true, 1) + objs, dirs, err := walk.GetAll(context.Background(), remote, dir, true, 1) if errors.Cause(err) == fs.ErrorDirNotFound { - objs, dirs, err = walk.GetAll(remote, fstest.WinPath(dir), true, 1) + objs, dirs, err = walk.GetAll(context.Background(), remote, fstest.WinPath(dir), true, 1) } require.NoError(t, err) objNames = objsToNames(objs) @@ -859,7 +860,7 @@ func Run(t *testing.T, opt *Opt) { // Test the files are all there with walk.ListR recursive listings t.Run("FsListR", func(t *testing.T) { skipIfNotOk(t) - objs, dirs, err := walk.GetAll(remote, "", true, -1) + objs, dirs, err := walk.GetAll(context.Background(), remote, "", true, -1) require.NoError(t, err) assert.Equal(t, []string{ "hello_ sausage", @@ -877,7 +878,7 @@ func Run(t *testing.T, opt *Opt) { // walk.ListR recursive listings on a sub dir t.Run("FsListRSubdir", func(t *testing.T) { skipIfNotOk(t) - objs, dirs, err := walk.GetAll(remote, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1) + objs, dirs, err := walk.GetAll(context.Background(), remote, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1) require.NoError(t, err) assert.Equal(t, []string{ "hello_ sausage/êé", @@ -894,7 +895,7 @@ func Run(t *testing.T, opt *Opt) { skipIfNotOk(t) rootRemote, err := fs.NewFs(remoteName) require.NoError(t, err) - _, dirs, err := walk.GetAll(rootRemote, "", true, 1) + _, dirs, err := walk.GetAll(context.Background(), rootRemote, "", true, 1) require.NoError(t, err) assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found") } @@ -916,7 +917,7 @@ func Run(t *testing.T, opt *Opt) { for i := 0; i < 2; i++ { dir, _ := path.Split(fileName) dir = dir[:len(dir)-1] - objs, dirs, err = walk.GetAll(remote, dir, true, -1) + objs, dirs, err = walk.GetAll(context.Background(), remote, dir, true, -1) if err != fs.ErrorDirNotFound { break } @@ -938,7 +939,7 @@ func Run(t *testing.T, opt *Opt) { // TestFsListLevel2 tests List works for 2 levels TestFsListLevel2 := func(t *testing.T) { skipIfNotOk(t) - objs, dirs, err := walk.GetAll(remote, "", true, 2) + objs, dirs, err := walk.GetAll(context.Background(), remote, "", true, 2) if err == fs.ErrorLevelNotSupported { return } @@ -977,7 +978,7 @@ func Run(t *testing.T, opt *Opt) { t.Run("FsNewObjectDir", func(t *testing.T) { skipIfNotOk(t) dir := path.Dir(file2.Path) - obj, err := remote.NewObject(dir) + obj, err := remote.NewObject(context.Background(), dir) assert.Nil(t, obj) assert.NotNil(t, err) }) @@ -998,7 +999,7 @@ func Run(t *testing.T, opt *Opt) { // do the copy src := findObject(t, remote, file2.Path) - dst, err := doCopy(src, file2Copy.Path) + dst, err := doCopy(context.Background(), src, file2Copy.Path) if err == fs.ErrorCantCopy { t.Skip("FS can't copy") } @@ -1011,7 +1012,7 @@ func Run(t *testing.T, opt *Opt) { assert.Equal(t, file2Copy.Path, dst.Remote()) // Delete copy - err = dst.Remove() + err = dst.Remove(context.Background()) require.NoError(t, err) }) @@ -1038,7 +1039,7 @@ func Run(t *testing.T, opt *Opt) { file2Move.Path = "other.txt" file2Move.WinPath = "" src := findObject(t, remote, file2.Path) - dst, err := doMove(src, file2Move.Path) + dst, err := doMove(context.Background(), src, file2Move.Path) if err == fs.ErrorCantMove { t.Skip("FS can't move") } @@ -1053,7 +1054,7 @@ func Run(t *testing.T, opt *Opt) { // Check conflict on "rename, then move" file1Move.Path = "moveTest/other.txt" src = findObject(t, remote, file1.Path) - _, err = doMove(src, file1Move.Path) + _, err = doMove(context.Background(), src, file1Move.Path) require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{file1Move, file2Move}) // 1: moveTest/other.txt @@ -1061,21 +1062,21 @@ func Run(t *testing.T, opt *Opt) { // Check conflict on "move, then rename" src = findObject(t, remote, file1Move.Path) - _, err = doMove(src, file1.Path) + _, err = doMove(context.Background(), src, file1.Path) require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move}) // 1: file name.txt // 2: other.txt src = findObject(t, remote, file2Move.Path) - _, err = doMove(src, file2.Path) + _, err = doMove(context.Background(), src, file2.Path) require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{file1, file2}) // 1: file name.txt // 2: hello sausage?/../z.txt // Tidy up moveTest directory - require.NoError(t, remote.Rmdir("moveTest")) + require.NoError(t, remote.Rmdir(context.Background(), "moveTest")) }) // Move src to this remote using server side move operations. @@ -1099,7 +1100,7 @@ func Run(t *testing.T, opt *Opt) { } // Check it can't move onto itself - err := doDirMove(remote, "", "") + err := doDirMove(context.Background(), remote, "", "") require.Equal(t, fs.ErrorDirExists, err) // new remote @@ -1109,12 +1110,12 @@ func Run(t *testing.T, opt *Opt) { const newName = "new_name/sub_new_name" // try the move - err = newRemote.Features().DirMove(remote, "", newName) + err = newRemote.Features().DirMove(context.Background(), remote, "", newName) require.NoError(t, err) // check remotes // remote should not exist here - _, err = remote.List("") + _, err = remote.List(context.Background(), "") assert.Equal(t, fs.ErrorDirNotFound, errors.Cause(err)) //fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision()) file1Copy := file1 @@ -1132,7 +1133,7 @@ func Run(t *testing.T, opt *Opt) { }, newRemote.Precision()) // move it back - err = doDirMove(newRemote, newName, "") + err = doDirMove(context.Background(), newRemote, newName, "") require.NoError(t, err) // check remotes @@ -1153,7 +1154,7 @@ func Run(t *testing.T, opt *Opt) { if isBucketBasedButNotRoot(remote) { t.Skip("Skipping test as non root bucket based remote") } - err := remote.Rmdir("") + err := remote.Rmdir(context.Background(), "") require.Error(t, err, "Expecting error on RMdir on non empty remote") }) @@ -1217,7 +1218,7 @@ func Run(t *testing.T, opt *Opt) { TestObjectModTime := func(t *testing.T) { skipIfNotOk(t) obj := findObject(t, remote, file1.Path) - file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision()) + file1.CheckModTime(t, obj, obj.ModTime(context.Background()), remote.Precision()) } t.Run("ObjectModTime", TestObjectModTime) @@ -1229,7 +1230,7 @@ func Run(t *testing.T, opt *Opt) { if !ok { t.Skip("MimeType method not supported") } - mimeType := do.MimeType() + mimeType := do.MimeType(context.Background()) if strings.ContainsRune(mimeType, ';') { assert.Equal(t, "text/plain; charset=utf-8", mimeType) } else { @@ -1242,14 +1243,14 @@ func Run(t *testing.T, opt *Opt) { skipIfNotOk(t) newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z") obj := findObject(t, remote, file1.Path) - err := obj.SetModTime(newModTime) + err := obj.SetModTime(context.Background(), newModTime) if err == fs.ErrorCantSetModTime || err == fs.ErrorCantSetModTimeWithoutDelete { t.Log(err) return } require.NoError(t, err) file1.ModTime = newModTime - file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision()) + file1.CheckModTime(t, obj, obj.ModTime(context.Background()), remote.Precision()) // And make a new object and read it from there too TestObjectModTime(t) }) @@ -1316,7 +1317,7 @@ func Run(t *testing.T, opt *Opt) { file1.Size = int64(buf.Len()) obj := findObject(t, remote, file1.Path) obji := object.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs()) - err := obj.Update(in, obji) + err := obj.Update(context.Background(), in, obji) require.NoError(t, err) file1.Hashes = hash.Sums() @@ -1373,34 +1374,34 @@ func Run(t *testing.T, opt *Opt) { } // if object not found - link, err := doPublicLink(file1.Path + "_does_not_exist") + link, err := doPublicLink(context.Background(), file1.Path+"_does_not_exist") require.Error(t, err, "Expected to get error when file doesn't exist") require.Equal(t, "", link, "Expected link to be empty on error") // sharing file for the first time - link1, err := doPublicLink(file1.Path) + link1, err := doPublicLink(context.Background(), file1.Path) require.NoError(t, err) require.NotEqual(t, "", link1, "Link should not be empty") - link2, err := doPublicLink(file2.Path) + link2, err := doPublicLink(context.Background(), file2.Path) require.NoError(t, err) require.NotEqual(t, "", link2, "Link should not be empty") require.NotEqual(t, link1, link2, "Links to different files should differ") // sharing file for the 2nd time - link1, err = doPublicLink(file1.Path) + link1, err = doPublicLink(context.Background(), file1.Path) require.NoError(t, err) require.NotEqual(t, "", link1, "Link should not be empty") // sharing directory for the first time path := path.Dir(file2.Path) - link3, err := doPublicLink(path) + link3, err := doPublicLink(context.Background(), path) require.NoError(t, err) require.NotEqual(t, "", link3, "Link should not be empty") // sharing directory for the second time - link3, err = doPublicLink(path) + link3, err = doPublicLink(context.Background(), path) require.NoError(t, err) require.NotEqual(t, "", link3, "Link should not be empty") @@ -1411,10 +1412,10 @@ func Run(t *testing.T, opt *Opt) { // ensure sub remote isn't empty buf := bytes.NewBufferString("somecontent") obji := object.NewStaticObjectInfo("somefile", time.Now(), int64(buf.Len()), true, nil, nil) - _, err = subRemote.Put(buf, obji) + _, err = subRemote.Put(context.Background(), buf, obji) require.NoError(t, err) - link4, err := subRemote.Features().PublicLink("") + link4, err := subRemote.Features().PublicLink(context.Background(), "") require.NoError(t, err, "Sharing root in a sub-remote should work") require.NotEqual(t, "", link4, "Link should not be empty") }) @@ -1463,7 +1464,7 @@ func Run(t *testing.T, opt *Opt) { t.Run("ObjectRemove", func(t *testing.T) { skipIfNotOk(t) obj := findObject(t, remote, file1.Path) - err := obj.Remove() + err := obj.Remove(context.Background()) require.NoError(t, err) // check listing without modtime as TestPublicLink may change the modtime fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file2}, nil, fs.ModTimeNotSupported) @@ -1496,7 +1497,7 @@ func Run(t *testing.T, opt *Opt) { file.Size = -1 obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) - obj, err = remote.Features().PutStream(in, obji) + obj, err = remote.Features().PutStream(context.Background(), in, obji) return err }) file.Hashes = uploadHash.Sums() @@ -1518,7 +1519,7 @@ func Run(t *testing.T, opt *Opt) { } // Can't really check the output much! - usage, err := doAbout() + usage, err := doAbout(context.Background()) require.NoError(t, err) require.NotNil(t, usage) assert.NotEqual(t, int64(0), usage.Total) @@ -1550,9 +1551,9 @@ func Run(t *testing.T, opt *Opt) { in := bytes.NewBufferString(contents) obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil) - obj, err := remote.Put(in, obji) + obj, err := remote.Put(context.Background(), in, obji) if err == nil { - require.NoError(t, obj.Remove(), "successfully uploaded unknown-sized file but failed to remove") + require.NoError(t, obj.Remove(context.Background()), "successfully uploaded unknown-sized file but failed to remove") } // if err != nil: it's okay as long as no panic }) @@ -1574,9 +1575,9 @@ func Run(t *testing.T, opt *Opt) { obj := findObject(t, remote, unknownSizeUpdateFile.Path) obji := object.NewStaticObjectInfo(unknownSizeUpdateFile.Path, unknownSizeUpdateFile.ModTime, -1, true, nil, obj.Fs()) - err := obj.Update(in, obji) + err := obj.Update(context.Background(), in, obji) if err == nil { - require.NoError(t, obj.Remove(), "successfully updated object with unknown-sized source but failed to remove") + require.NoError(t, obj.Remove(context.Background()), "successfully updated object with unknown-sized source but failed to remove") } // if err != nil: it's okay as long as no panic }) @@ -1598,21 +1599,21 @@ func Run(t *testing.T, opt *Opt) { colonIndex := strings.IndexRune(deepRemoteName, ':') firstSlashIndex := strings.IndexRune(deepRemoteName, '/') firstDir := deepRemoteName[colonIndex+1 : firstSlashIndex] - _, err = deepRemote.NewObject(firstDir) + _, err = deepRemote.NewObject(context.Background(), firstDir) require.Equal(t, fs.ErrorObjectNotFound, err) // If err is not fs.ErrorObjectNotFound, it means the backend is // somehow confused about root and absolute root. }) // Purge the folder - err = operations.Purge(remote, "") + err = operations.Purge(context.Background(), remote, "") require.NoError(t, err) purged = true fstest.CheckListing(t, remote, []fstest.Item{}) // Check purging again if not bucket based if !isBucketBasedButNotRoot(remote) { - err = operations.Purge(remote, "") + err = operations.Purge(context.Background(), remote, "") assert.Error(t, err, "Expecting error after on second purge") } @@ -1620,7 +1621,7 @@ func Run(t *testing.T, opt *Opt) { // Check directory is purged if !purged { - _ = operations.Purge(remote, "") + _ = operations.Purge(context.Background(), remote, "") } // Remove the local directory so we don't clutter up /tmp diff --git a/fstest/mockfs/mockfs.go b/fstest/mockfs/mockfs.go index e32fe9566..1d181a2a6 100644 --- a/fstest/mockfs/mockfs.go +++ b/fstest/mockfs/mockfs.go @@ -1,6 +1,7 @@ package mockfs import ( + "context" "errors" "fmt" "io" @@ -69,13 +70,13 @@ func (f *Fs) Features() *fs.Features { // // This should return ErrDirNotFound if the directory isn't // found. -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { return nil, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return nil, fs.ErrorObjectNotFound } @@ -84,21 +85,21 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) { // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return nil, ErrNotImplemented } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists -func (f *Fs) Mkdir(dir string) error { +func (f *Fs) Mkdir(ctx context.Context, dir string) error { return ErrNotImplemented } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty -func (f *Fs) Rmdir(dir string) error { +func (f *Fs) Rmdir(ctx context.Context, dir string) error { return ErrNotImplemented } diff --git a/fstest/mockobject/mockobject.go b/fstest/mockobject/mockobject.go index afeaad8ab..0a58d72f0 100644 --- a/fstest/mockobject/mockobject.go +++ b/fstest/mockobject/mockobject.go @@ -3,6 +3,7 @@ package mockobject import ( "bytes" + "context" "errors" "fmt" "io" @@ -39,13 +40,13 @@ func (o Object) Remote() string { // Hash returns the selected checksum of the file // If no checksum is available it returns "" -func (o Object) Hash(hash.Type) (string, error) { +func (o Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", errNotImpl } // ModTime returns the modification date of the file // It should return a best guess if one isn't available -func (o Object) ModTime() (t time.Time) { +func (o Object) ModTime(ctx context.Context) (t time.Time) { return t } @@ -58,22 +59,22 @@ func (o Object) Storable() bool { } // SetModTime sets the metadata on the object to set the modification date -func (o Object) SetModTime(time.Time) error { +func (o Object) SetModTime(ctx context.Context, t time.Time) error { return errNotImpl } // Open opens the file for read. Call Close() on the returned io.ReadCloser -func (o Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { +func (o Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { return nil, errNotImpl } // Update in to the object with the modTime given of the given size -func (o Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { +func (o Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { return errNotImpl } // Remove this object -func (o Object) Remove() error { +func (o Object) Remove(ctx context.Context) error { return errNotImpl } @@ -107,7 +108,7 @@ func (o Object) WithContent(content []byte, mode SeekMode) fs.Object { } } -func (o *contentMockObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) { +func (o *contentMockObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { diff --git a/fstest/run.go b/fstest/run.go index 5eb8c0335..ea521b8db 100644 --- a/fstest/run.go +++ b/fstest/run.go @@ -26,6 +26,7 @@ package fstest import ( "bytes" + "context" "flag" "fmt" "io/ioutil" @@ -130,6 +131,7 @@ func retry(t *testing.T, what string, f func() error) { // // Finalise() will tidy them away when done. func newRunIndividual(t *testing.T, individual bool) *Run { + ctx := context.Background() var r *Run if individual { r = newRun() @@ -139,11 +141,11 @@ func newRunIndividual(t *testing.T, individual bool) *Run { *r = *oneRun r.cleanRemote = func() { var toDelete []string - err := walk.ListR(r.Fremote, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error { + err := walk.ListR(ctx, r.Fremote, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error { for _, entry := range entries { switch x := entry.(type) { case fs.Object: - retry(t, fmt.Sprintf("removing file %q", x.Remote()), x.Remove) + retry(t, fmt.Sprintf("removing file %q", x.Remote()), func() error { return x.Remove(ctx) }) case fs.Directory: toDelete = append(toDelete, x.Remote()) } @@ -158,7 +160,7 @@ func newRunIndividual(t *testing.T, individual bool) *Run { for i := len(toDelete) - 1; i >= 0; i-- { dir := toDelete[i] retry(t, fmt.Sprintf("removing dir %q", dir), func() error { - return r.Fremote.Rmdir(dir) + return r.Fremote.Rmdir(ctx, dir) }) } // Check remote is empty @@ -222,8 +224,8 @@ func (r *Run) WriteFile(filePath, content string, t time.Time) Item { } // ForceMkdir creates the remote -func (r *Run) ForceMkdir(f fs.Fs) { - err := f.Mkdir("") +func (r *Run) ForceMkdir(ctx context.Context, f fs.Fs) { + err := f.Mkdir(ctx, "") if err != nil { r.Fatalf("Failed to mkdir %q: %v", f, err) } @@ -231,14 +233,14 @@ func (r *Run) ForceMkdir(f fs.Fs) { } // Mkdir creates the remote if it hasn't been created already -func (r *Run) Mkdir(f fs.Fs) { +func (r *Run) Mkdir(ctx context.Context, f fs.Fs) { if !r.mkdir[f.String()] { - r.ForceMkdir(f) + r.ForceMkdir(ctx, f) } } // WriteObjectTo writes an object to the fs, remote passed in -func (r *Run) WriteObjectTo(f fs.Fs, remote, content string, modTime time.Time, useUnchecked bool) Item { +func (r *Run) WriteObjectTo(ctx context.Context, f fs.Fs, remote, content string, modTime time.Time, useUnchecked bool) Item { put := f.Put if useUnchecked { put = f.Features().PutUnchecked @@ -246,12 +248,12 @@ func (r *Run) WriteObjectTo(f fs.Fs, remote, content string, modTime time.Time, r.Fatalf("Fs doesn't support PutUnchecked") } } - r.Mkdir(f) + r.Mkdir(ctx, f) const maxTries = 10 for tries := 1; ; tries++ { in := bytes.NewBufferString(content) objinfo := object.NewStaticObjectInfo(remote, modTime, int64(len(content)), true, nil, nil) - _, err := put(in, objinfo) + _, err := put(ctx, in, objinfo) if err == nil { break } @@ -267,19 +269,19 @@ func (r *Run) WriteObjectTo(f fs.Fs, remote, content string, modTime time.Time, } // WriteObject writes an object to the remote -func (r *Run) WriteObject(remote, content string, modTime time.Time) Item { - return r.WriteObjectTo(r.Fremote, remote, content, modTime, false) +func (r *Run) WriteObject(ctx context.Context, remote, content string, modTime time.Time) Item { + return r.WriteObjectTo(ctx, r.Fremote, remote, content, modTime, false) } // WriteUncheckedObject writes an object to the remote not checking for duplicates -func (r *Run) WriteUncheckedObject(remote, content string, modTime time.Time) Item { - return r.WriteObjectTo(r.Fremote, remote, content, modTime, true) +func (r *Run) WriteUncheckedObject(ctx context.Context, remote, content string, modTime time.Time) Item { + return r.WriteObjectTo(ctx, r.Fremote, remote, content, modTime, true) } // WriteBoth calls WriteObject and WriteFile with the same arguments -func (r *Run) WriteBoth(remote, content string, modTime time.Time) Item { +func (r *Run) WriteBoth(ctx context.Context, remote, content string, modTime time.Time) Item { r.WriteFile(remote, content, modTime) - return r.WriteObject(remote, content, modTime) + return r.WriteObject(ctx, remote, content, modTime) } // CheckWithDuplicates does a test but allows duplicates @@ -293,7 +295,7 @@ func (r *Run) CheckWithDuplicates(t *testing.T, items ...Item) { sort.Strings(want) // do the listing - objs, _, err := walk.GetAll(r.Fremote, "", true, -1) + objs, _, err := walk.GetAll(context.Background(), r.Fremote, "", true, -1) if err != nil && err != fs.ErrorDirNotFound { t.Fatalf("Error listing: %v", err) } diff --git a/fstest/test_all/clean.go b/fstest/test_all/clean.go index 2b0f1b9b4..2e61b96a7 100644 --- a/fstest/test_all/clean.go +++ b/fstest/test_all/clean.go @@ -5,6 +5,7 @@ package main import ( + "context" "log" "regexp" @@ -25,7 +26,7 @@ func cleanFs(remote string) error { if err != nil { return err } - entries, err := list.DirSorted(f, true, "") + entries, err := list.DirSorted(context.Background(), f, true, "") if err != nil { return err } @@ -46,7 +47,7 @@ func cleanFs(remote string) error { fs.Errorf(fullPath, "%v", err) return nil } - err = operations.Purge(dir, "") + err = operations.Purge(context.Background(), dir, "") if err != nil { err = errors.Wrap(err, "Purge failed") lastErr = err diff --git a/lib/dircache/dircache.go b/lib/dircache/dircache.go index 62564bc2b..25e82b1e0 100644 --- a/lib/dircache/dircache.go +++ b/lib/dircache/dircache.go @@ -4,6 +4,7 @@ package dircache // _methods are called without the lock import ( + "context" "log" "strings" "sync" @@ -28,8 +29,8 @@ type DirCache struct { // DirCacher describes an interface for doing the low level directory work type DirCacher interface { - FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) - CreateDir(pathID, leaf string) (newID string, err error) + FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) + CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) } // New makes a DirCache @@ -135,10 +136,10 @@ func SplitPath(path string) (directory, leaf string) { // Look in the cache for the path, if found return the pathID // If not found strip the last path off the path and recurse // Now have a parent directory id, so look in the parent for self and return it -func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) { +func (dc *DirCache) FindDir(ctx context.Context, path string, create bool) (pathID string, err error) { dc.mu.Lock() defer dc.mu.Unlock() - return dc._findDir(path, create) + return dc._findDir(ctx, path, create) } // Look for the root and in the cache - safe to call without the mu @@ -161,7 +162,7 @@ func (dc *DirCache) _findDirInCache(path string) string { } // Unlocked findDir - must have mu -func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) { +func (dc *DirCache) _findDir(ctx context.Context, path string, create bool) (pathID string, err error) { pathID = dc._findDirInCache(path) if pathID != "" { return pathID, nil @@ -171,14 +172,14 @@ func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error directory, leaf := SplitPath(path) // Recurse and find pathID for parent directory - parentPathID, err := dc._findDir(directory, create) + parentPathID, err := dc._findDir(ctx, directory, create) if err != nil { return "", err } // Find the leaf in parentPathID - pathID, found, err := dc.fs.FindLeaf(parentPathID, leaf) + pathID, found, err := dc.fs.FindLeaf(ctx, parentPathID, leaf) if err != nil { return "", err } @@ -186,7 +187,7 @@ func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error // If not found create the directory if required or return an error if !found { if create { - pathID, err = dc.fs.CreateDir(parentPathID, leaf) + pathID, err = dc.fs.CreateDir(ctx, parentPathID, leaf) if err != nil { return "", errors.Wrap(err, "failed to make directory") } @@ -207,7 +208,7 @@ func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error // Do not call FindPath with the root directory - it will return an error // // If create is set parent directories will be created if they don't exist -func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) { +func (dc *DirCache) FindPath(ctx context.Context, path string, create bool) (leaf, directoryID string, err error) { if path == "" { err = errors.New("internal error: can't call FindPath with root directory") return @@ -215,7 +216,7 @@ func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string dc.mu.Lock() defer dc.mu.Unlock() directory, leaf := SplitPath(path) - directoryID, err = dc._findDir(directory, create) + directoryID, err = dc._findDir(ctx, directory, create) return } @@ -224,13 +225,13 @@ func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string // Resets the root directory // // If create is set it will make the directory if not found -func (dc *DirCache) FindRoot(create bool) error { +func (dc *DirCache) FindRoot(ctx context.Context, create bool) error { dc.mu.Lock() defer dc.mu.Unlock() if dc.foundRoot { return nil } - rootID, err := dc._findDir(dc.root, create) + rootID, err := dc._findDir(ctx, dc.root, create) if err != nil { return err } @@ -252,12 +253,12 @@ func (dc *DirCache) FindRoot(create bool) error { // FindRootAndPath finds the root first if not found then finds leaf and directoryID from a path // // If create is set parent directories will be created if they don't exist -func (dc *DirCache) FindRootAndPath(path string, create bool) (leaf, directoryID string, err error) { - err = dc.FindRoot(create) +func (dc *DirCache) FindRootAndPath(ctx context.Context, path string, create bool) (leaf, directoryID string, err error) { + err = dc.FindRoot(ctx, create) if err != nil { return } - return dc.FindPath(path, create) + return dc.FindPath(ctx, path, create) } // FoundRoot returns whether the root directory has been found yet diff --git a/vfs/dir.go b/vfs/dir.go index 5aa450c4f..fda29c2a6 100644 --- a/vfs/dir.go +++ b/vfs/dir.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "os" "path" "sort" @@ -36,7 +37,7 @@ func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir { parent: parent, entry: fsDir, path: fsDir.Remote(), - modTime: fsDir.ModTime(), + modTime: fsDir.ModTime(context.TODO()), inode: newInode(), items: make(map[string]Node), } @@ -177,7 +178,7 @@ func (d *Dir) rename(newParent *Dir, fsDir fs.Directory) { d.parent = newParent d.entry = fsDir d.path = fsDir.Remote() - d.modTime = fsDir.ModTime() + d.modTime = fsDir.ModTime(context.TODO()) d.read = time.Time{} } @@ -207,7 +208,7 @@ func (d *Dir) _readDir() error { } else { return nil } - entries, err := list.DirSorted(d.f, false, d.path) + entries, err := list.DirSorted(context.TODO(), d.f, false, d.path) if err == fs.ErrorDirNotFound { // We treat directory not found as empty because we // create directories on the fly @@ -294,7 +295,7 @@ func (d *Dir) readDirTree() error { d.mu.Unlock() when := time.Now() fs.Debugf(path, "Reading directory tree") - dt, err := walk.NewDirTree(f, path, false, -1) + dt, err := walk.NewDirTree(context.TODO(), f, path, false, -1) if err != nil { return err } @@ -477,7 +478,7 @@ func (d *Dir) Mkdir(name string) (*Dir, error) { return nil, err } // fs.Debugf(path, "Dir.Mkdir") - err = d.f.Mkdir(path) + err = d.f.Mkdir(context.TODO(), path) if err != nil { fs.Errorf(d, "Dir.Mkdir failed to create directory: %v", err) return nil, err @@ -505,7 +506,7 @@ func (d *Dir) Remove() error { return ENOTEMPTY } // remove directory - err = d.f.Rmdir(d.path) + err = d.f.Rmdir(context.TODO(), d.path) if err != nil { fs.Errorf(d, "Dir.Remove failed to remove directory: %v", err) return err @@ -575,7 +576,7 @@ func (d *Dir) Rename(oldName, newName string, destDir *Dir) error { switch x := oldNode.DirEntry().(type) { case nil: if oldFile, ok := oldNode.(*File); ok { - if err = oldFile.rename(destDir, newName); err != nil { + if err = oldFile.rename(context.TODO(), destDir, newName); err != nil { fs.Errorf(oldPath, "Dir.Rename error: %v", err) return err } @@ -585,7 +586,7 @@ func (d *Dir) Rename(oldName, newName string, destDir *Dir) error { } case fs.Object: if oldFile, ok := oldNode.(*File); ok { - if err = oldFile.rename(destDir, newName); err != nil { + if err = oldFile.rename(context.TODO(), destDir, newName); err != nil { fs.Errorf(oldPath, "Dir.Rename error: %v", err) return err } @@ -603,12 +604,12 @@ func (d *Dir) Rename(oldName, newName string, destDir *Dir) error { } srcRemote := x.Remote() dstRemote := newPath - err = operations.DirMove(d.f, srcRemote, dstRemote) + err = operations.DirMove(context.TODO(), d.f, srcRemote, dstRemote) if err != nil { fs.Errorf(oldPath, "Dir.Rename error: %v", err) return err } - newDir := fs.NewDirCopy(x).SetRemote(newPath) + newDir := fs.NewDirCopy(context.TODO(), x).SetRemote(newPath) // Update the node with the new details if oldNode != nil { if oldDir, ok := oldNode.(*Dir); ok { diff --git a/vfs/dir_handle_test.go b/vfs/dir_handle_test.go index 4bbe635e8..df5f3fc3b 100644 --- a/vfs/dir_handle_test.go +++ b/vfs/dir_handle_test.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "io" "os" "testing" @@ -43,9 +44,9 @@ func TestDirHandleReaddir(t *testing.T) { defer r.Finalise() vfs := New(r.Fremote, nil) - file1 := r.WriteObject("dir/file1", "file1 contents", t1) - file2 := r.WriteObject("dir/file2", "file2- contents", t2) - file3 := r.WriteObject("dir/subdir/file3", "file3-- contents", t3) + file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) + file2 := r.WriteObject(context.Background(), "dir/file2", "file2- contents", t2) + file3 := r.WriteObject(context.Background(), "dir/subdir/file3", "file3-- contents", t3) fstest.CheckItems(t, r.Fremote, file1, file2, file3) node, err := vfs.Stat("dir") diff --git a/vfs/dir_test.go b/vfs/dir_test.go index 9165d21e8..d61bdf172 100644 --- a/vfs/dir_test.go +++ b/vfs/dir_test.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "fmt" "os" "sort" @@ -16,7 +17,7 @@ import ( func dirCreate(t *testing.T, r *fstest.Run) (*VFS, *Dir, fstest.Item) { vfs := New(r.Fremote, nil) - file1 := r.WriteObject("dir/file1", "file1 contents", t1) + file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) fstest.CheckItems(t, r.Fremote, file1) node, err := vfs.Stat("dir") @@ -142,7 +143,7 @@ func TestDirWalk(t *testing.T) { defer r.Finalise() vfs, _, file1 := dirCreate(t, r) - file2 := r.WriteObject("fil/a/b/c", "super long file", t1) + file2 := r.WriteObject(context.Background(), "fil/a/b/c", "super long file", t1) fstest.CheckItems(t, r.Fremote, file1, file2) root, err := vfs.Root() @@ -257,9 +258,9 @@ func TestDirReadDirAll(t *testing.T) { defer r.Finalise() vfs := New(r.Fremote, nil) - file1 := r.WriteObject("dir/file1", "file1 contents", t1) - file2 := r.WriteObject("dir/file2", "file2- contents", t2) - file3 := r.WriteObject("dir/subdir/file3", "file3-- contents", t3) + file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) + file2 := r.WriteObject(context.Background(), "dir/file2", "file2- contents", t2) + file3 := r.WriteObject(context.Background(), "dir/subdir/file3", "file3-- contents", t3) fstest.CheckItems(t, r.Fremote, file1, file2, file3) node, err := vfs.Stat("dir") @@ -475,7 +476,7 @@ func TestDirRename(t *testing.T) { } vfs, dir, file1 := dirCreate(t, r) - file3 := r.WriteObject("dir/file3", "file3 contents!", t1) + file3 := r.WriteObject(context.Background(), "dir/file3", "file3 contents!", t1) fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file3}, []string{"dir"}, r.Fremote.Precision()) root, err := vfs.Root() diff --git a/vfs/file.go b/vfs/file.go index a20c6d525..90034601b 100644 --- a/vfs/file.go +++ b/vfs/file.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "os" "path" "sync" @@ -19,17 +20,17 @@ type File struct { size int64 // size of file - read and written with atomic int64 - must be 64 bit aligned d *Dir // parent directory - read only - mu sync.Mutex // protects the following - o fs.Object // NB o may be nil if file is being written - leaf string // leaf name of the object - rwOpenCount int // number of open files on this handle - writers []Handle // writers for this file - nwriters int32 // len(writers) which is read/updated with atomic - readWriters int // how many RWFileHandle are open for writing - readWriterClosing bool // is a RWFileHandle currently cosing? - modified bool // has the cache file be modified by a RWFileHandle? - pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written - pendingRenameFun func() error // will be run/renamed after all writers close + mu sync.Mutex // protects the following + o fs.Object // NB o may be nil if file is being written + leaf string // leaf name of the object + rwOpenCount int // number of open files on this handle + writers []Handle // writers for this file + nwriters int32 // len(writers) which is read/updated with atomic + readWriters int // how many RWFileHandle are open for writing + readWriterClosing bool // is a RWFileHandle currently cosing? + modified bool // has the cache file be modified by a RWFileHandle? + pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written + pendingRenameFun func(ctx context.Context) error // will be run/renamed after all writers close muRW sync.Mutex // synchonize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove } @@ -100,7 +101,7 @@ func (f *File) applyPendingRename() { return } fs.Debugf(f.o, "Running delayed rename now") - if err := fun(); err != nil { + if err := fun(context.TODO()); err != nil { fs.Errorf(f.Path(), "delayed File.Rename error: %v", err) } } @@ -108,17 +109,17 @@ func (f *File) applyPendingRename() { // rename attempts to immediately rename a file if there are no open writers. // Otherwise it will queue the rename operation on the remote until no writers // remain. -func (f *File) rename(destDir *Dir, newName string) error { +func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error { if features := f.d.f.Features(); features.Move == nil && features.Copy == nil { err := errors.Errorf("Fs %q can't rename files (no server side Move or Copy)", f.d.f) fs.Errorf(f.Path(), "Dir.Rename error: %v", err) return err } - renameCall := func() error { + renameCall := func(ctx context.Context) error { newPath := path.Join(destDir.path, newName) - dstOverwritten, _ := f.d.f.NewObject(newPath) - newObject, err := operations.Move(f.d.f, dstOverwritten, newPath, f.o) + dstOverwritten, _ := f.d.f.NewObject(ctx, newPath) + newObject, err := operations.Move(ctx, f.d.f, dstOverwritten, newPath, f.o) if err != nil { fs.Errorf(f.Path(), "File.Rename error: %v", err) return err @@ -151,7 +152,7 @@ func (f *File) rename(destDir *Dir, newName string) error { return nil } - return renameCall() + return renameCall(ctx) } // addWriter adds a write handle to the file @@ -252,7 +253,7 @@ func (f *File) ModTime() (modTime time.Time) { return f.pendingModTime } } else { - return f.o.ModTime() + return f.o.ModTime(context.TODO()) } } @@ -310,7 +311,7 @@ func (f *File) applyPendingModTime() error { return errors.New("Cannot apply ModTime, file object is not available") } - err := f.o.SetModTime(f.pendingModTime) + err := f.o.SetModTime(context.TODO(), f.pendingModTime) switch err { case nil: fs.Debugf(f.o, "File.applyPendingModTime OK") @@ -453,7 +454,7 @@ func (f *File) Remove() error { f.muRW.Lock() // muRW must be locked before mu to avoid f.mu.Lock() // deadlock in RWFileHandle.openPending and .close if f.o != nil { - err := f.o.Remove() + err := f.o.Remove(context.TODO()) if err != nil { fs.Errorf(f, "File.Remove file error: %v", err) f.mu.Unlock() diff --git a/vfs/file_test.go b/vfs/file_test.go index c3ed30896..8dd2b371e 100644 --- a/vfs/file_test.go +++ b/vfs/file_test.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "io/ioutil" "os" "testing" @@ -13,7 +14,7 @@ import ( func fileCreate(t *testing.T, r *fstest.Run) (*VFS, *File, fstest.Item) { vfs := New(r.Fremote, nil) - file1 := r.WriteObject("dir/file1", "file1 contents", t1) + file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) fstest.CheckItems(t, r.Fremote, file1) node, err := vfs.Stat("dir/file1") diff --git a/vfs/rc.go b/vfs/rc.go index 7a010abb8..1f67e7318 100644 --- a/vfs/rc.go +++ b/vfs/rc.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "strconv" "strings" "time" @@ -14,7 +15,7 @@ import ( func (vfs *VFS) addRC() { rc.Add(rc.Call{ Path: "vfs/forget", - Fn: func(in rc.Params) (out rc.Params, err error) { + Fn: func(ctx context.Context, in rc.Params) (out rc.Params, err error) { root, err := vfs.Root() if err != nil { return nil, err @@ -65,7 +66,7 @@ starting with dir will forget that dir, eg }) rc.Add(rc.Call{ Path: "vfs/refresh", - Fn: func(in rc.Params) (out rc.Params, err error) { + Fn: func(ctx context.Context, in rc.Params) (out rc.Params, err error) { root, err := vfs.Root() if err != nil { return nil, err @@ -253,7 +254,7 @@ func rcPollFunc(vfs *VFS) (rcPollFunc rc.Func) { }, }, nil } - return func(in rc.Params) (out rc.Params, err error) { + return func(ctx context.Context, in rc.Params) (out rc.Params, err error) { interval, intervalPresent, err := getInterval(in) if err != nil { return nil, err diff --git a/vfs/read.go b/vfs/read.go index 3e7b1b920..49c2a10f3 100644 --- a/vfs/read.go +++ b/vfs/read.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "io" "os" "sync" @@ -65,7 +66,7 @@ func (fh *ReadFileHandle) openPending() (err error) { return nil } o := fh.file.getObject() - r, err := chunkedreader.New(o, int64(fh.file.d.vfs.Opt.ChunkSize), int64(fh.file.d.vfs.Opt.ChunkSizeLimit)).Open() + r, err := chunkedreader.New(context.TODO(), o, int64(fh.file.d.vfs.Opt.ChunkSize), int64(fh.file.d.vfs.Opt.ChunkSizeLimit)).Open() if err != nil { return err } @@ -122,7 +123,7 @@ func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) { } if !reopen { fs.Debugf(fh.remote, "ReadFileHandle.seek from %d to %d (fs.RangeSeeker)", fh.offset, offset) - _, err = r.RangeSeek(offset, io.SeekStart, -1) + _, err = r.RangeSeek(context.TODO(), offset, io.SeekStart, -1) if err != nil { fs.Debugf(fh.remote, "ReadFileHandle.Read fs.RangeSeeker failed: %v", err) return err @@ -136,7 +137,7 @@ func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) { } // re-open with a seek o := fh.file.getObject() - r = chunkedreader.New(o, int64(fh.file.d.vfs.Opt.ChunkSize), int64(fh.file.d.vfs.Opt.ChunkSizeLimit)) + r = chunkedreader.New(context.TODO(), o, int64(fh.file.d.vfs.Opt.ChunkSize), int64(fh.file.d.vfs.Opt.ChunkSizeLimit)) _, err := r.Seek(offset, 0) if err != nil { fs.Debugf(fh.remote, "ReadFileHandle.Read seek failed: %v", err) @@ -289,7 +290,7 @@ func (fh *ReadFileHandle) checkHash() error { o := fh.file.getObject() for hashType, dstSum := range fh.hash.Sums() { - srcSum, err := o.Hash(hashType) + srcSum, err := o.Hash(context.TODO(), hashType) if err != nil { return err } diff --git a/vfs/read_test.go b/vfs/read_test.go index 717f3e6e9..503e5a3e2 100644 --- a/vfs/read_test.go +++ b/vfs/read_test.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "io" "os" "testing" @@ -14,7 +15,7 @@ import ( func readHandleCreate(t *testing.T, r *fstest.Run) (*VFS, *ReadFileHandle) { vfs := New(r.Fremote, nil) - file1 := r.WriteObject("dir/file1", "0123456789abcdef", t1) + file1 := r.WriteObject(context.Background(), "dir/file1", "0123456789abcdef", t1) fstest.CheckItems(t, r.Fremote, file1) h, err := vfs.OpenFile("dir/file1", os.O_RDONLY, 0777) diff --git a/vfs/read_write.go b/vfs/read_write.go index 3cd436659..c3cc5a3eb 100644 --- a/vfs/read_write.go +++ b/vfs/read_write.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "fmt" "io" "io/ioutil" @@ -85,9 +86,9 @@ func newRWFileHandle(d *Dir, f *File, remote string, flags int) (fh *RWFileHandl // copy an object to or from the remote while accounting for it func copyObj(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { - if operations.NeedTransfer(dst, src) { + if operations.NeedTransfer(context.TODO(), dst, src) { accounting.Stats.Transferring(src.Remote()) - newDst, err = operations.Copy(f, dst, remote, src) + newDst, err = operations.Copy(context.TODO(), f, dst, remote, src) accounting.Stats.DoneTransferring(src.Remote(), err == nil) } else { newDst = dst @@ -115,7 +116,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) { // If the remote object exists AND its cached file exists locally AND there are no // other RW handles with it open, then attempt to update it. if o != nil && fh.file.rwOpens() == 0 { - cacheObj, err := fh.d.vfs.cache.f.NewObject(fh.remote) + cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.remote) if err == nil && cacheObj != nil { _, err = copyObj(fh.d.vfs.cache.f, cacheObj, fh.remote, o) if err != nil { @@ -296,7 +297,7 @@ func (fh *RWFileHandle) close() (err error) { if isCopied { // Transfer the temp file to the remote - cacheObj, err := fh.d.vfs.cache.f.NewObject(fh.remote) + cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.remote) if err != nil { err = errors.Wrap(err, "failed to find cache file") fs.Errorf(fh.logPrefix(), "%v", err) diff --git a/vfs/read_write_test.go b/vfs/read_write_test.go index b47c5b444..3f5c0a055 100644 --- a/vfs/read_write_test.go +++ b/vfs/read_write_test.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "io" "io/ioutil" "os" @@ -25,7 +26,7 @@ func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) { opt.CacheMode = CacheModeFull vfs := New(r.Fremote, &opt) - file1 := r.WriteObject("dir/file1", "0123456789abcdef", t1) + file1 := r.WriteObject(context.Background(), "dir/file1", "0123456789abcdef", t1) fstest.CheckItems(t, r.Fremote, file1) h, err := vfs.OpenFile("dir/file1", os.O_RDONLY, 0777) diff --git a/vfs/vfs.go b/vfs/vfs.go index 31f9d56c9..36f705d56 100644 --- a/vfs/vfs.go +++ b/vfs/vfs.go @@ -229,7 +229,7 @@ func New(f fs.Fs, opt *Options) *VFS { // Start polling function if do := vfs.f.Features().ChangeNotify; do != nil { vfs.pollChan = make(chan time.Duration) - do(vfs.root.ForgetPath, vfs.pollChan) + do(context.TODO(), vfs.root.ForgetPath, vfs.pollChan) vfs.pollChan <- vfs.Opt.PollInterval } else { fs.Infof(f, "poll-interval is not supported by this remote") @@ -480,7 +480,7 @@ func (vfs *VFS) Statfs() (total, used, free int64) { } if vfs.usageTime.IsZero() || time.Since(vfs.usageTime) >= vfs.Opt.DirCacheTime { var err error - vfs.usage, err = doAbout() + vfs.usage, err = doAbout(context.TODO()) vfs.usageTime = time.Now() if err != nil { fs.Errorf(vfs.f, "Statfs failed: %v", err) diff --git a/vfs/vfs_test.go b/vfs/vfs_test.go index 8ccd38e82..120ebebe7 100644 --- a/vfs/vfs_test.go +++ b/vfs/vfs_test.go @@ -3,6 +3,7 @@ package vfs import ( + "context" "io" "os" "testing" @@ -130,8 +131,8 @@ func TestVFSStat(t *testing.T) { defer r.Finalise() vfs := New(r.Fremote, nil) - file1 := r.WriteObject("file1", "file1 contents", t1) - file2 := r.WriteObject("dir/file2", "file2 contents", t2) + file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1) + file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) fstest.CheckItems(t, r.Fremote, file1, file2) node, err := vfs.Stat("file1") @@ -167,8 +168,8 @@ func TestVFSStatParent(t *testing.T) { defer r.Finalise() vfs := New(r.Fremote, nil) - file1 := r.WriteObject("file1", "file1 contents", t1) - file2 := r.WriteObject("dir/file2", "file2 contents", t2) + file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1) + file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) fstest.CheckItems(t, r.Fremote, file1, file2) node, leaf, err := vfs.StatParent("file1") @@ -201,8 +202,8 @@ func TestVFSOpenFile(t *testing.T) { defer r.Finalise() vfs := New(r.Fremote, nil) - file1 := r.WriteObject("file1", "file1 contents", t1) - file2 := r.WriteObject("dir/file2", "file2 contents", t2) + file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1) + file2 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) fstest.CheckItems(t, r.Fremote, file1, file2) fd, err := vfs.OpenFile("file1", os.O_RDONLY, 0777) @@ -238,7 +239,7 @@ func TestVFSRename(t *testing.T) { } vfs := New(r.Fremote, nil) - file1 := r.WriteObject("dir/file2", "file2 contents", t2) + file1 := r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2) fstest.CheckItems(t, r.Fremote, file1) err := vfs.Rename("dir/file2", "dir/file1") diff --git a/vfs/write.go b/vfs/write.go index 4cf5df9b2..cdfda39b5 100644 --- a/vfs/write.go +++ b/vfs/write.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "io" "os" "sync" @@ -65,7 +66,7 @@ func (fh *WriteFileHandle) openPending() (err error) { pipeReader, fh.pipeWriter = io.Pipe() go func() { // NB Rcat deals with Stats.Transferring etc - o, err := operations.Rcat(fh.file.d.f, fh.remote, pipeReader, time.Now()) + o, err := operations.Rcat(context.TODO(), fh.file.d.f, fh.remote, pipeReader, time.Now()) if err != nil { fs.Errorf(fh.remote, "WriteFileHandle.New Rcat failed: %v", err) } diff --git a/vfs/write_test.go b/vfs/write_test.go index ec721b9a3..c91150cdd 100644 --- a/vfs/write_test.go +++ b/vfs/write_test.go @@ -1,6 +1,7 @@ package vfs import ( + "context" "os" "sync" "testing" @@ -230,11 +231,11 @@ var ( func canSetModTime(t *testing.T, r *fstest.Run) bool { canSetModTimeOnce.Do(func() { mtime1 := time.Date(2008, time.November, 18, 17, 32, 31, 0, time.UTC) - _ = r.WriteObject("time_test", "stuff", mtime1) - obj, err := r.Fremote.NewObject("time_test") + _ = r.WriteObject(context.Background(), "time_test", "stuff", mtime1) + obj, err := r.Fremote.NewObject(context.Background(), "time_test") require.NoError(t, err) mtime2 := time.Date(2009, time.November, 18, 17, 32, 31, 0, time.UTC) - err = obj.SetModTime(mtime2) + err = obj.SetModTime(context.Background(), mtime2) switch err { case nil: canSetModTimeValue = true @@ -243,7 +244,7 @@ func canSetModTime(t *testing.T, r *fstest.Run) bool { default: require.NoError(t, err) } - require.NoError(t, obj.Remove()) + require.NoError(t, obj.Remove(context.Background())) fs.Debugf(nil, "Can set mod time: %v", canSetModTimeValue) }) return canSetModTimeValue