diff --git a/amazonclouddrive/amazonclouddrive.go b/amazonclouddrive/amazonclouddrive.go index 26e4ac4aa..48b6eec4d 100644 --- a/amazonclouddrive/amazonclouddrive.go +++ b/amazonclouddrive/amazonclouddrive.go @@ -90,6 +90,7 @@ func init() { // Fs represents a remote acd server type Fs struct { name string // name of this remote + features *fs.Features // optional features c *acd.Client // the connection to the acd server noAuthClient *http.Client // unauthenticated http client root string // the path we are working on @@ -126,6 +127,11 @@ func (f *Fs) String() string { return fmt.Sprintf("amazon drive root '%s'", f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // Pattern to match a acd path var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) @@ -184,6 +190,7 @@ func NewFs(name, root string) (fs.Fs, error) { noAuthClient: fs.Config.Client(), ts: ts, } + f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f) // Update endpoints var resp *http.Response diff --git a/b2/b2.go b/b2/b2.go index 5440a5f89..8a663a590 100644 --- a/b2/b2.go +++ b/b2/b2.go @@ -79,6 +79,8 @@ func init() { // Fs represents a remote b2 server type Fs struct { name string // name of this remote + root string // the path we are working on if any + features *fs.Features // optional features account string // account name key string // auth key endpoint string // name of the starting api endpoint @@ -86,7 +88,6 @@ type Fs struct { bucket string // the bucket we are working on bucketIDMutex sync.Mutex // mutex to protect _bucketID _bucketID string // the ID of the bucket we are working on - root string // the path we are working on if any info api.AuthorizeAccountResponse // result of authorize call uploadMu sync.Mutex // lock for upload variable uploads []*api.GetUploadURLResponse // result of get upload URL calls @@ -130,6 +131,11 @@ func (f *Fs) String() string { return fmt.Sprintf("B2 bucket %s path %s", f.bucket, f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // Pattern to match a b2 path var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) @@ -250,6 +256,7 @@ func NewFs(name, root string) (fs.Fs, error) { uploadTokens: make(chan struct{}, fs.Config.Transfers), extraTokens: make(chan struct{}, fs.Config.Transfers), } + f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) // Set the test flag if required if *b2TestMode != "" { testMode := strings.TrimSpace(*b2TestMode) diff --git a/crypt/crypt.go b/crypt/crypt.go index 2e123c03b..92fd1f96b 100644 --- a/crypt/crypt.go +++ b/crypt/crypt.go @@ -88,21 +88,30 @@ func NewFs(name, rpath string) (fs.Fs, error) { } f := &Fs{ Fs: wrappedFs, - cipher: cipher, - mode: mode, name: name, root: rpath, + cipher: cipher, + mode: mode, } + // the features here are ones we could support, and they are + // ANDed with the ones from wrappedFs + f.features = (&fs.Features{ + CaseInsensitive: mode == NameEncryptionOff, + DuplicateFiles: true, + ReadMimeType: false, // MimeTypes not supported with crypt + WriteMimeType: false, + }).Fill(f).Mask(wrappedFs) return f, err } // Fs represents a wrapped fs.Fs type Fs struct { fs.Fs - cipher Cipher - mode NameEncryptionMode - name string - root string + name string + root string + features *fs.Features // optional features + cipher Cipher + mode NameEncryptionMode } // Name of the remote (as passed into NewFs) @@ -115,6 +124,11 @@ func (f *Fs) Root() string { return f.root } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("Encrypted %s", f.Fs.String()) @@ -177,11 +191,11 @@ func (f *Fs) Rmdir(dir string) error { // // Return an error if it doesn't exist func (f *Fs) Purge() error { - do, ok := f.Fs.(fs.Purger) - if !ok { + do := f.Fs.Features().Purge + if do == nil { return fs.ErrorCantPurge } - return do.Purge() + return do() } // Copy src to this remote using server side copy operations. @@ -194,15 +208,15 @@ func (f *Fs) Purge() error { // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { - do, ok := f.Fs.(fs.Copier) - if !ok { + do := f.Fs.Features().Copy + if do == nil { return nil, fs.ErrorCantCopy } o, ok := src.(*Object) if !ok { return nil, fs.ErrorCantCopy } - oResult, err := do.Copy(o.Object, f.cipher.EncryptFileName(remote)) + oResult, err := do(o.Object, f.cipher.EncryptFileName(remote)) if err != nil { return nil, err } @@ -219,15 +233,15 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { - do, ok := f.Fs.(fs.Mover) - if !ok { + do := f.Fs.Features().Move + if do == nil { return nil, fs.ErrorCantMove } o, ok := src.(*Object) if !ok { return nil, fs.ErrorCantMove } - oResult, err := do.Move(o.Object, f.cipher.EncryptFileName(remote)) + oResult, err := do(o.Object, f.cipher.EncryptFileName(remote)) if err != nil { return nil, err } @@ -243,8 +257,8 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(src fs.Fs) error { - do, ok := f.Fs.(fs.DirMover) - if !ok { + do := f.Fs.Features().DirMove + if do == nil { return fs.ErrorCantDirMove } srcFs, ok := src.(*Fs) @@ -252,7 +266,39 @@ func (f *Fs) DirMove(src fs.Fs) error { fs.Debug(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } - return do.DirMove(srcFs.Fs) + return do(srcFs.Fs) +} + +// PutUnchecked uploads the object +// +// This will create a duplicate if we upload a new file without +// checking to see if there is one already - use Put() for that. +func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo) (fs.Object, error) { + do := f.Fs.Features().PutUnchecked + if do == nil { + return nil, errors.New("can't PutUnchecked") + } + wrappedIn, err := f.cipher.EncryptData(in) + if err != nil { + return nil, err + } + o, err := do(wrappedIn, f.newObjectInfo(src)) + if err != nil { + return nil, err + } + return f.newObject(o), nil +} + +// CleanUp the trash in the Fs +// +// Implement this if you have a way of emptying the trash or +// otherwise cleaning up old versions of files. +func (f *Fs) CleanUp() error { + do := f.Fs.Features().CleanUp + if do == nil { + return errors.New("can't CleanUp") + } + return do() } // UnWrap returns the Fs that this Fs is wrapping @@ -473,14 +519,15 @@ func (lo *ListOpts) IncludeDirectory(remote string) bool { // Check the interfaces are satisfied var ( - _ fs.Fs = (*Fs)(nil) - _ fs.Purger = (*Fs)(nil) - _ fs.Copier = (*Fs)(nil) - _ fs.Mover = (*Fs)(nil) - _ fs.DirMover = (*Fs)(nil) - // _ fs.PutUncheckeder = (*Fs)(nil) - _ fs.UnWrapper = (*Fs)(nil) - _ fs.ObjectInfo = (*ObjectInfo)(nil) - _ fs.Object = (*Object)(nil) - _ fs.ListOpts = (*ListOpts)(nil) + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.PutUncheckeder = (*Fs)(nil) + _ fs.CleanUpper = (*Fs)(nil) + _ fs.UnWrapper = (*Fs)(nil) + _ fs.ObjectInfo = (*ObjectInfo)(nil) + _ fs.Object = (*Object)(nil) + _ fs.ListOpts = (*ListOpts)(nil) ) diff --git a/drive/drive.go b/drive/drive.go index 2abbea837..d2a6f100b 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -115,8 +115,9 @@ func init() { // Fs represents a remote drive server type Fs struct { name string // name of this remote - svc *drive.Service // the connection to the drive server root string // the path we are working on + features *fs.Features // optional features + svc *drive.Service // the connection to the drive server client *http.Client // authorized client about *drive.About // information about the drive, including the root dirCache *dircache.DirCache // Map of directory path to directory id @@ -154,6 +155,11 @@ func (f *Fs) String() string { return fmt.Sprintf("Google drive root '%s'", f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // shouldRetry determines whehter a given err rates being retried func shouldRetry(err error) (again bool, errOut error) { again = false @@ -294,6 +300,7 @@ func NewFs(name, path string) (fs.Fs, error) { root: root, pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer), } + f.features = (&fs.Features{DuplicateFiles: true, ReadMimeType: true, WriteMimeType: true}).Fill(f) // Create a new authorized Drive client. f.client = oAuthClient diff --git a/dropbox/dropbox.go b/dropbox/dropbox.go index 33c9c5279..16df15ea9 100644 --- a/dropbox/dropbox.go +++ b/dropbox/dropbox.go @@ -96,8 +96,9 @@ func configHelper(name string) { // Fs represents a remote dropbox server type Fs struct { name string // name of this remote - db *dropbox.Dropbox // the connection to the dropbox server root string // the path we are working on + features *fs.Features // optional features + db *dropbox.Dropbox // the connection to the dropbox server slashRoot string // root with "/" prefix, lowercase slashRootSlash string // root with "/" prefix and postfix, lowercase } @@ -129,6 +130,11 @@ func (f *Fs) String() string { return fmt.Sprintf("Dropbox root '%s'", f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // Makes a new dropbox from the config func newDropbox(name string) (*dropbox.Dropbox, error) { db := dropbox.NewDropbox() @@ -159,6 +165,7 @@ func NewFs(name, root string) (fs.Fs, error) { name: name, db: db, } + f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f) f.setRoot(root) // Read the token from the config file diff --git a/fs/fs.go b/fs/fs.go index 9139e4204..39d1e31f2 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -159,6 +159,9 @@ type Info interface { // Returns the supported hash types of the filesystem Hashes() HashSet + + // Features returns the optional features of this Fs + Features() *Features } // Object is a filesystem like object provided by an Fs @@ -217,6 +220,159 @@ type MimeTyper interface { MimeType() string } +// Features describe the optional features of the Fs +type Features struct { + // Feature flags + CaseInsensitive bool + DuplicateFiles bool + ReadMimeType bool + WriteMimeType bool + + // Purge all files in the root and the root directory + // + // Implement this if you have a way of deleting all the files + // quicker than just running Remove() on the result of List() + // + // Return an error if it doesn't exist + Purge func() error + + // Copy src to this remote using server side copy operations. + // + // This is stored with the remote path given + // + // It returns the destination Object and a possible error + // + // Will only be called if src.Fs().Name() == f.Name() + // + // If it isn't possible then return fs.ErrorCantCopy + Copy func(src Object, remote string) (Object, error) + + // Move src to this remote using server side move operations. + // + // This is stored with the remote path given + // + // It returns the destination Object and a possible error + // + // Will only be called if src.Fs().Name() == f.Name() + // + // If it isn't possible then return fs.ErrorCantMove + Move func(src Object, remote string) (Object, error) + + // DirMove moves src to this remote using server side move + // operations. + // + // Will only be called if src.Fs().Name() == f.Name() + // + // If it isn't possible then return fs.ErrorCantDirMove + // + // If destination exists then return fs.ErrorDirExists + DirMove func(src Fs) error + + // UnWrap returns the Fs that this Fs is wrapping + UnWrap func() Fs + + // DirCacheFlush resets the directory cache - used in testing + // as an optional interface + DirCacheFlush func() + + // Put in to the remote path with the modTime given of the given size + // + // May create the object even if it returns an error - if so + // will return the object and the error, otherwise will return + // nil and the error + // + // May create duplicates or return errors if src already + // exists. + PutUnchecked func(in io.Reader, src ObjectInfo) (Object, error) + + // CleanUp the trash in the Fs + // + // Implement this if you have a way of emptying the trash or + // otherwise cleaning up old versions of files. + CleanUp func() error +} + +// Fill fills in the function pointers in the Features struct from the +// optional interfaces. It returns the original updated Features +// struct passed in. +func (ft *Features) Fill(f Fs) *Features { + if do, ok := f.(Purger); ok { + ft.Purge = do.Purge + } + if do, ok := f.(Copier); ok { + ft.Copy = do.Copy + } + if do, ok := f.(Mover); ok { + ft.Move = do.Move + } + if do, ok := f.(DirMover); ok { + ft.DirMove = do.DirMove + } + if do, ok := f.(UnWrapper); ok { + ft.UnWrap = do.UnWrap + } + if do, ok := f.(DirCacheFlusher); ok { + ft.DirCacheFlush = do.DirCacheFlush + } + if do, ok := f.(PutUncheckeder); ok { + ft.PutUnchecked = do.PutUnchecked + } + if do, ok := f.(CleanUpper); ok { + ft.CleanUp = do.CleanUp + } + return ft +} + +// Mask the Features with the Fs passed in +// +// Only optional features which are implemented in both the original +// Fs AND the one passed in will be advertised. Any features which +// aren't in both will be set to false/nil, except for UnWrap which +// will be left untouched. +func (ft *Features) Mask(f Fs) *Features { + mask := f.Features() + ft.CaseInsensitive = ft.CaseInsensitive && mask.CaseInsensitive + ft.DuplicateFiles = ft.DuplicateFiles && mask.DuplicateFiles + ft.ReadMimeType = ft.ReadMimeType && mask.ReadMimeType + ft.WriteMimeType = ft.WriteMimeType && mask.WriteMimeType + if mask.Purge == nil { + ft.Purge = nil + } + if mask.Copy == nil { + ft.Copy = nil + } + if mask.Move == nil { + ft.Move = nil + } + if mask.DirMove == nil { + ft.DirMove = nil + } + // if mask.UnWrap == nil { + // ft.UnWrap = nil + // } + if mask.DirCacheFlush == nil { + ft.DirCacheFlush = nil + } + if mask.PutUnchecked == nil { + ft.PutUnchecked = nil + } + if mask.CleanUp == nil { + ft.CleanUp = nil + } + return ft +} + +// Wrap makes a Copy of the features passed in, overriding the UnWrap +// method only if available in f. +func (ft *Features) Wrap(f Fs) *Features { + copy := new(Features) + *copy = *ft + if do, ok := f.(UnWrapper); ok { + copy.UnWrap = do.UnWrap + } + return copy +} + // Purger is an optional interfaces for Fs type Purger interface { // Purge all files in the root and the root directory diff --git a/fs/operations.go b/fs/operations.go index de66c33ac..16f1dbce3 100644 --- a/fs/operations.go +++ b/fs/operations.go @@ -252,9 +252,9 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) { // Try server side copy first - if has optional interface and // is same underlying remote actionTaken = "Copied (server side copy)" - if fCopy, ok := f.(Copier); ok && SameConfig(src.Fs(), f) { + if doCopy := f.Features().Copy; doCopy != nil && SameConfig(src.Fs(), f) { var newDst Object - newDst, err = fCopy.Copy(src, remote) + newDst, err = doCopy(src, remote) if err == nil { dst = newDst } @@ -353,7 +353,7 @@ func Move(fdst Fs, dst Object, remote string, src Object) (err error) { return nil } // See if we have Move available - if do, ok := fdst.(Mover); ok && SameConfig(src.Fs(), fdst) { + if doMove := fdst.Features().Move; doMove != nil && SameConfig(src.Fs(), fdst) { // Delete destination if it exists if dst != nil { err = DeleteFile(dst) @@ -362,7 +362,7 @@ func Move(fdst Fs, dst Object, remote string, src Object) (err error) { } } // Move dst <- src - _, err := do.Move(src, remote) + _, err := doMove(src, remote) switch err { case nil: Debug(src, "Moved (server side)") @@ -391,8 +391,8 @@ func Move(fdst Fs, dst Object, remote string, src Object) (err error) { // Some remotes simulate rename by server-side copy and delete, so include // remotes that implements either Mover or Copier. func CanServerSideMove(fdst Fs) bool { - _, canMove := fdst.(Mover) - _, canCopy := fdst.(Copier) + canMove := fdst.Features().Move != nil + canCopy := fdst.Features().Copy != nil return canMove || canCopy } @@ -880,12 +880,12 @@ func Rmdir(f Fs, dir string) error { func Purge(f Fs) error { doFallbackPurge := true var err error - if purger, ok := f.(Purger); ok { + if doPurge := f.Features().Purge; doPurge != nil { doFallbackPurge = false if Config.DryRun { Log(f, "Not purging as --dry-run set") } else { - err = purger.Purge() + err = doPurge() if err == ErrorCantPurge { doFallbackPurge = true } @@ -929,8 +929,8 @@ func Delete(f Fs) error { // dedupeRename renames the objs slice to different names func dedupeRename(remote string, objs []Object) { f := objs[0].Fs() - mover, ok := f.(Mover) - if !ok { + doMove := f.Features().Move + if doMove == nil { log.Fatalf("Fs %v doesn't support Move", f) } ext := path.Ext(remote) @@ -938,7 +938,7 @@ func dedupeRename(remote string, objs []Object) { for i, o := range objs { newName := fmt.Sprintf("%s-%d%s", base, i+1, ext) if !Config.DryRun { - newObj, err := mover.Move(o, newName) + newObj, err := doMove(o, newName) if err != nil { Stats.Error() ErrorLog(o, "Failed to rename: %v", err) @@ -1159,15 +1159,15 @@ func listToChan(list *Lister) ObjectsChan { // CleanUp removes the trash for the Fs func CleanUp(f Fs) error { - fc, ok := f.(CleanUpper) - if !ok { + doCleanUp := f.Features().CleanUp + if doCleanUp == nil { return errors.Errorf("%v doesn't support cleanup", f) } if Config.DryRun { Log(f, "Not running cleanup as --dry-run set") return nil } - return fc.CleanUp() + return doCleanUp() } // Cat any files to the io.Writer diff --git a/fs/operations_test.go b/fs/operations_test.go index da195f843..808d5f21f 100644 --- a/fs/operations_test.go +++ b/fs/operations_test.go @@ -148,6 +148,9 @@ func NewRun(t *testing.T) *Run { for { o, err := list.GetObject() if err != nil { + if err == fs.ErrorDirNotFound { + break + } t.Fatalf("Error listing: %v", err) } // Check if we are finished @@ -223,9 +226,8 @@ func (r *Run) Mkdir(f fs.Fs) { func (r *Run) WriteObjectTo(f fs.Fs, remote, content string, modTime time.Time, useUnchecked bool) fstest.Item { put := f.Put if useUnchecked { - if fPutUnchecked, ok := f.(fs.PutUncheckeder); ok { - put = fPutUnchecked.PutUnchecked - } else { + put = f.Features().PutUnchecked + if put == nil { r.Fatalf("Fs doesn't support PutUnchecked") } } @@ -514,12 +516,22 @@ func (r *Run) checkWithDuplicates(t *testing.T, items ...fstest.Item) { assert.Equal(t, wantSize, size) } -func TestDeduplicateInteractive(t *testing.T) { - if *RemoteName != "TestDrive:" { - t.Skip("Can only test deduplicate on google drive") +func skipIfCantDedupe(t *testing.T, f fs.Fs) { + if f.Features().PutUnchecked == nil { + t.Skip("Can't test deduplicate - no PutUnchecked") } + if !f.Features().DuplicateFiles { + t.Skip("Can't test deduplicate - no duplicate files possible") + } + if !f.Hashes().Contains(fs.HashMD5) { + t.Skip("Can't test deduplicate - MD5 not supported") + } +} + +func TestDeduplicateInteractive(t *testing.T) { r := NewRun(t) defer r.Finalise() + skipIfCantDedupe(t, r.fremote) file1 := r.WriteUncheckedObject("one", "This is one", t1) file2 := r.WriteUncheckedObject("one", "This is one", t1) @@ -533,11 +545,9 @@ func TestDeduplicateInteractive(t *testing.T) { } func TestDeduplicateSkip(t *testing.T) { - if *RemoteName != "TestDrive:" { - t.Skip("Can only test deduplicate on google drive") - } r := NewRun(t) defer r.Finalise() + skipIfCantDedupe(t, r.fremote) file1 := r.WriteUncheckedObject("one", "This is one", t1) file2 := r.WriteUncheckedObject("one", "This is one", t1) @@ -551,11 +561,9 @@ func TestDeduplicateSkip(t *testing.T) { } func TestDeduplicateFirst(t *testing.T) { - if *RemoteName != "TestDrive:" { - t.Skip("Can only test deduplicate on google drive") - } r := NewRun(t) defer r.Finalise() + skipIfCantDedupe(t, r.fremote) file1 := r.WriteUncheckedObject("one", "This is one", t1) file2 := r.WriteUncheckedObject("one", "This is one A", t1) @@ -574,11 +582,9 @@ func TestDeduplicateFirst(t *testing.T) { } func TestDeduplicateNewest(t *testing.T) { - if *RemoteName != "TestDrive:" { - t.Skip("Can only test deduplicate on google drive") - } r := NewRun(t) defer r.Finalise() + skipIfCantDedupe(t, r.fremote) file1 := r.WriteUncheckedObject("one", "This is one", t1) file2 := r.WriteUncheckedObject("one", "This is one too", t2) @@ -592,11 +598,9 @@ func TestDeduplicateNewest(t *testing.T) { } func TestDeduplicateOldest(t *testing.T) { - if *RemoteName != "TestDrive:" { - t.Skip("Can only test deduplicate on google drive") - } r := NewRun(t) defer r.Finalise() + skipIfCantDedupe(t, r.fremote) file1 := r.WriteUncheckedObject("one", "This is one", t1) file2 := r.WriteUncheckedObject("one", "This is one too", t2) @@ -610,11 +614,9 @@ func TestDeduplicateOldest(t *testing.T) { } func TestDeduplicateRename(t *testing.T) { - if *RemoteName != "TestDrive:" { - t.Skip("Can only test deduplicate on google drive") - } r := NewRun(t) defer r.Finalise() + skipIfCantDedupe(t, r.fremote) file1 := r.WriteUncheckedObject("one.txt", "This is one", t1) file2 := r.WriteUncheckedObject("one.txt", "This is one too", t2) @@ -778,6 +780,7 @@ type testFsInfo struct { stringVal string precision time.Duration hashes fs.HashSet + features fs.Features } // Name of the remote (as passed into NewFs) @@ -795,6 +798,9 @@ func (i *testFsInfo) Precision() time.Duration { return i.precision } // Returns the supported hash types of the filesystem func (i *testFsInfo) Hashes() fs.HashSet { return i.hashes } +// Returns the supported hash types of the filesystem +func (i *testFsInfo) Features() *fs.Features { return &i.features } + func TestSameConfig(t *testing.T) { a := &testFsInfo{name: "name", root: "root"} for _, test := range []struct { diff --git a/fs/sync.go b/fs/sync.go index fb80879a2..d36e7e290 100644 --- a/fs/sync.go +++ b/fs/sync.go @@ -720,13 +720,13 @@ func MoveDir(fdst, fsrc Fs) error { } // First attempt to use DirMover if exists, same Fs and no filters are active - if fdstDirMover, ok := fdst.(DirMover); ok && SameConfig(fsrc, fdst) && Config.Filter.InActive() { + if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && SameConfig(fsrc, fdst) && Config.Filter.InActive() { if Config.DryRun { Log(fdst, "Not doing server side directory move as --dry-run") return nil } Debug(fdst, "Using server side directory move") - err := fdstDirMover.DirMove(fsrc) + err := fdstDirMove(fsrc) switch err { case ErrorCantDirMove, ErrorDirExists: Debug(fdst, "Server side directory move failed - fallback to file moves: %v", err) diff --git a/fs/sync_test.go b/fs/sync_test.go index b9b6800b8..87a438dae 100644 --- a/fs/sync_test.go +++ b/fs/sync_test.go @@ -719,7 +719,7 @@ func TestServerSideMoveOverlap(t *testing.T) { r := NewRun(t) defer r.Finalise() - if _, ok := r.fremote.(fs.DirMover); ok { + if r.fremote.Features().DirMove != nil { t.Skip("Skipping test as remote supports DirMove") } diff --git a/fstest/fstest.go b/fstest/fstest.go index 39fe18c57..ff81a1dfb 100644 --- a/fstest/fstest.go +++ b/fstest/fstest.go @@ -175,9 +175,9 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs sleep *= 2 t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries) time.Sleep(sleep) - if do, ok := f.(fs.DirCacheFlusher); ok { + if doDirCacheFlush := f.Features().DirCacheFlush; doDirCacheFlush != nil { t.Logf("Flushing the directory cache") - do.DirCacheFlush() + doDirCacheFlush() } } for _, obj := range objs { diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go index 5b0f53312..a31c234eb 100644 --- a/fstest/fstests/fstests.go +++ b/fstest/fstests/fstests.go @@ -384,8 +384,8 @@ func TestFsCopy(t *testing.T) { skipIfNotOk(t) // Check have Copy - _, ok := remote.(fs.Copier) - if !ok { + doCopy := remote.Features().Copy + if doCopy == nil { t.Skip("FS has no Copier interface") } @@ -394,7 +394,7 @@ func TestFsCopy(t *testing.T) { // do the copy src := findObject(t, file1.Path) - dst, err := remote.(fs.Copier).Copy(src, file1Copy.Path) + dst, err := doCopy(src, file1Copy.Path) if err == fs.ErrorCantCopy { t.Skip("FS can't copy") } @@ -417,8 +417,8 @@ func TestFsMove(t *testing.T) { skipIfNotOk(t) // Check have Move - _, ok := remote.(fs.Mover) - if !ok { + doMove := remote.Features().Move + if doMove == nil { t.Skip("FS has no Mover interface") } @@ -433,7 +433,7 @@ func TestFsMove(t *testing.T) { // separate operations file2Move.Path = "other.txt" src := findObject(t, file2.Path) - dst, err := remote.(fs.Mover).Move(src, file2Move.Path) + dst, err := doMove(src, file2Move.Path) if err == fs.ErrorCantMove { t.Skip("FS can't move") } @@ -448,7 +448,7 @@ func TestFsMove(t *testing.T) { // Check conflict on "rename, then move" file1Move.Path = "moveTest/other.txt" src = findObject(t, file1.Path) - _, err = remote.(fs.Mover).Move(src, file1Move.Path) + _, err = doMove(src, file1Move.Path) require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{file1Move, file2Move}) // 1: moveTest/other.txt @@ -456,14 +456,14 @@ func TestFsMove(t *testing.T) { // Check conflict on "move, then rename" src = findObject(t, file1Move.Path) - _, err = remote.(fs.Mover).Move(src, file1.Path) + _, err = doMove(src, file1.Path) require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move}) // 1: file name.txt // 2: other.txt src = findObject(t, file2Move.Path) - _, err = remote.(fs.Mover).Move(src, file2.Path) + _, err = doMove(src, file2.Path) require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{file1, file2}) // 1: file name.txt @@ -483,13 +483,13 @@ func TestFsDirMove(t *testing.T) { skipIfNotOk(t) // Check have DirMove - _, ok := remote.(fs.DirMover) - if !ok { + doDirMove := remote.Features().DirMove + if doDirMove == nil { t.Skip("FS has no DirMover interface") } // Check it can't move onto itself - err := remote.(fs.DirMover).DirMove(remote) + err := doDirMove(remote) require.Equal(t, fs.ErrorDirExists, err) // new remote @@ -498,7 +498,7 @@ func TestFsDirMove(t *testing.T) { defer removeNewRemote() // try the move - err = newRemote.(fs.DirMover).DirMove(remote) + err = newRemote.Features().DirMove(remote) require.NoError(t, err) // check remotes @@ -507,7 +507,7 @@ func TestFsDirMove(t *testing.T) { fstest.CheckListing(t, newRemote, []fstest.Item{file2, file1}) // move it back - err = remote.(fs.DirMover).DirMove(newRemote) + err = doDirMove(newRemote) require.NoError(t, err) // check remotes @@ -550,8 +550,8 @@ func TestObjectFs(t *testing.T) { testRemote := remote if obj.Fs() != testRemote { // Check to see if this wraps something else - if unwrap, ok := testRemote.(fs.UnWrapper); ok { - testRemote = unwrap.UnWrap() + if doUnWrap := testRemote.Features().UnWrap; doUnWrap != nil { + testRemote = doUnWrap() } } assert.Equal(t, obj.Fs(), testRemote) diff --git a/googlecloudstorage/googlecloudstorage.go b/googlecloudstorage/googlecloudstorage.go index af8168143..b1f41b696 100644 --- a/googlecloudstorage/googlecloudstorage.go +++ b/googlecloudstorage/googlecloudstorage.go @@ -130,10 +130,11 @@ func init() { // Fs represents a remote storage server type Fs struct { name string // name of this remote + root string // the path we are working on if any + features *fs.Features // optional features svc *storage.Service // the connection to the storage server client *http.Client // authorized client bucket string // the bucket we are working on - root string // the path we are working on if any projectNumber string // used for finding buckets objectACL string // used when creating new objects bucketACL string // used when creating new buckets @@ -175,6 +176,11 @@ func (f *Fs) String() string { return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // Pattern to match a storage path var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) @@ -234,6 +240,7 @@ func NewFs(name, root string) (fs.Fs, error) { objectACL: fs.ConfigFileGet(name, "object_acl"), bucketACL: fs.ConfigFileGet(name, "bucket_acl"), } + f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) if f.objectACL == "" { f.objectACL = "private" } diff --git a/hubic/hubic.go b/hubic/hubic.go index 47d127f5f..b9804d777 100644 --- a/hubic/hubic.go +++ b/hubic/hubic.go @@ -76,6 +76,7 @@ type credentials struct { // Fs represents a remote hubic type Fs struct { fs.Fs // wrapped Fs + features *fs.Features // optional features client *http.Client // client for oauth api credentials credentials // returned from the Hubic API expires time.Time // time credentials expire @@ -169,37 +170,13 @@ func NewFs(name, root string) (fs.Fs, error) { return nil, err } f.Fs = swiftFs + f.features = f.Fs.Features().Wrap(f) return f, err } -// Purge deletes all the files and the container -// -// Optional interface: Only implement this if you have a way of -// deleting all the files quicker than just running Remove() on the -// result of List() -func (f *Fs) Purge() error { - fPurge, ok := f.Fs.(fs.Purger) - if !ok { - return fs.ErrorCantPurge - } - return fPurge.Purge() -} - -// Copy src to this remote using server side copy operations. -// -// This is stored with the remote path given -// -// It returns the destination Object and a possible error -// -// Will only be called if src.Fs().Name() == f.Name() -// -// If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { - fCopy, ok := f.Fs.(fs.Copier) - if !ok { - return nil, fs.ErrorCantCopy - } - return fCopy.Copy(src, remote) +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features } // UnWrap returns the Fs that this Fs is wrapping @@ -207,16 +184,8 @@ func (f *Fs) UnWrap() fs.Fs { return f.Fs } -// Hashes returns the supported hash sets. -// Inherited from swift -func (f *Fs) Hashes() fs.HashSet { - return fs.HashSet(fs.HashMD5) -} - // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) - _ fs.Purger = (*Fs)(nil) - _ fs.Copier = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil) ) diff --git a/local/local.go b/local/local.go index c35372982..ba5c9874c 100644 --- a/local/local.go +++ b/local/local.go @@ -47,6 +47,7 @@ func init() { type Fs struct { name string // the name of the remote root string // The root directory (OS path) + features *fs.Features // optional features dev uint64 // device number of root node precisionOk sync.Once // Whether we need to read the precision precision time.Duration // precision of local filesystem @@ -78,6 +79,7 @@ func NewFs(name, root string) (fs.Fs, error) { dev: devUnset, } f.root = f.cleanPath(root) + f.features = (&fs.Features{CaseInsensitive: f.caseInsensitive()}).Fill(f) // Check to see if this points to a file fi, err := os.Lstat(f.root) @@ -108,6 +110,21 @@ func (f *Fs) String() string { return fmt.Sprintf("Local file system at %s", f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// caseInsenstive returns whether the remote is case insensitive or not +func (f *Fs) caseInsensitive() bool { + // FIXME not entirely accurate since you can have case + // sensitive Fses on darwin and case insenstive Fses on linux. + // Should probably check but that would involve creating a + // file in the remote to be most accurate which probably isn't + // desirable. + return runtime.GOOS == "windows" || runtime.GOOS == "darwin" +} + // newObject makes a half completed Object func (f *Fs) newObject(remote string) *Object { dstPath := f.cleanPath(filepath.Join(f.root, remote)) diff --git a/onedrive/onedrive.go b/onedrive/onedrive.go index c702d6e8a..459b68391 100644 --- a/onedrive/onedrive.go +++ b/onedrive/onedrive.go @@ -81,8 +81,9 @@ func init() { // Fs represents a remote one drive type Fs struct { name string // name of this remote - srv *rest.Client // the connection to the one drive server root string // the path we are working on + features *fs.Features // optional features + srv *rest.Client // the connection to the one drive server dirCache *dircache.DirCache // Map of directory path to directory id pacer *pacer.Pacer // pacer for API calls } @@ -118,6 +119,11 @@ func (f *Fs) String() string { return fmt.Sprintf("One drive root '%s'", f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // Pattern to match a one drive path var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) @@ -184,6 +190,7 @@ func NewFs(name, root string) (fs.Fs, error) { srv: rest.NewClient(oAuthClient).SetRoot(rootURL), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } + f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f) f.srv.SetErrorHandler(errorHandler) // Get rootID diff --git a/s3/s3.go b/s3/s3.go index 457169628..b7e79ac27 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -216,11 +216,12 @@ var ( // Fs represents a remote s3 server type Fs struct { name string // the name of the remote + root string // root of the bucket - ignore all objects above this + features *fs.Features // optional features c *s3.S3 // the connection to the s3 server ses *session.Session // the s3 session bucket string // the bucket we are working on acl string // ACL for new buckets / objects - root string // root of the bucket - ignore all objects above this locationConstraint string // location constraint of new buckets sse string // the type of server-side encryption storageClass string // storage class @@ -264,6 +265,11 @@ func (f *Fs) String() string { return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // Pattern to match a s3 path var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) @@ -373,6 +379,7 @@ func NewFs(name, root string) (fs.Fs, error) { sse: fs.ConfigFileGet(name, "server_side_encryption"), storageClass: fs.ConfigFileGet(name, "storage_class"), } + f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) if *s3ACL != "" { f.acl = *s3ACL } diff --git a/swift/swift.go b/swift/swift.go index d508d33eb..d1b8c3b99 100644 --- a/swift/swift.go +++ b/swift/swift.go @@ -88,10 +88,11 @@ func init() { // Fs represents a remote swift server type Fs struct { name string // name of this remote + root string // the path we are working on if any + features *fs.Features // optional features c *swift.Connection // the connection to the swift server container string // the container we are working on segmentsContainer string // container to store the segments (if any) in - root string // the path we are working on if any } // Object describes a swift object @@ -127,6 +128,11 @@ func (f *Fs) String() string { return fmt.Sprintf("Swift container %s path %s", f.container, f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // Pattern to match a swift path var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) @@ -190,6 +196,7 @@ func NewFsWithConnection(name, root string, c *swift.Connection) (fs.Fs, error) segmentsContainer: container + "_segments", root: directory, } + f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) // StorageURL overloading storageURL := fs.ConfigFileGet(name, "storage_url") if storageURL != "" { diff --git a/yandex/yandex.go b/yandex/yandex.go index 90d3789bd..8ddd4e220 100644 --- a/yandex/yandex.go +++ b/yandex/yandex.go @@ -65,8 +65,9 @@ func init() { // Fs represents a remote yandex type Fs struct { name string - yd *yandex.Client // client for rest api root string //root path + features *fs.Features // optional features + yd *yandex.Client // client for rest api diskRoot string //root path with "disk:/" container name mkdircache map[string]int } @@ -98,6 +99,11 @@ func (f *Fs) String() string { return fmt.Sprintf("Yandex %s", f.root) } +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + // read access token from ConfigFile string func getAccessToken(name string) (*oauth2.Token, error) { // Read the token from the config file @@ -126,7 +132,7 @@ func NewFs(name, root string) (fs.Fs, error) { f := &Fs{ yd: yandexDisk, } - + f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) f.setRoot(root) // Check to see if the object exists and is a file