diff --git a/backend/alias/alias.go b/backend/alias/alias.go index b90037fb6..8954a3b78 100644 --- a/backend/alias/alias.go +++ b/backend/alias/alias.go @@ -30,7 +30,7 @@ type Options struct { Remote string `config:"remote"` } -// NewFs contstructs an Fs from the path. +// NewFs constructs an Fs from the path. // // The returned Fs is the actual Fs, referenced by remote in the config func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 13a98e70e..2f33dc87c 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -307,7 +307,7 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log}) } -// NewFs contstructs an Fs from the path, container:path +// NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) diff --git a/backend/b2/api/types.go b/backend/b2/api/types.go index feeede569..aff8404ee 100644 --- a/backend/b2/api/types.go +++ b/backend/b2/api/types.go @@ -17,12 +17,12 @@ type Error struct { Message string `json:"message"` // A human-readable message, in English, saying what went wrong. } -// Error statisfies the error interface +// Error satisfies the error interface func (e *Error) Error() string { return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code) } -// Fatal statisfies the Fatal interface +// Fatal satisfies the Fatal interface // // It indicates which errors should be treated as fatal func (e *Error) Fatal() bool { @@ -100,7 +100,7 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) { return Timestamp(newT), base[:versionStart] + ext } -// IsZero returns true if the timestamp is unitialised +// IsZero returns true if the timestamp is uninitialized func (t Timestamp) IsZero() bool { return time.Time(t).IsZero() } diff --git a/backend/b2/b2.go b/backend/b2/b2.go index 5f21bb692..604dd18ec 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -117,7 +117,7 @@ This value should be set no larger than 4.657GiB (== 5GB).`, When uploading large files, chunk the file into this size. Note that these chunks are buffered in memory and there might a maximum of "--transfers" chunks in progress at once. 5,000,000 Bytes is the -minimim size.`, +minimum size.`, Default: defaultChunkSize, Advanced: true, }, { @@ -319,7 +319,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { return } -// NewFs contstructs an Fs from the path, bucket:path +// NewFs constructs an Fs from the path, bucket:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) @@ -1459,7 +1459,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // Content-Type b2/x-auto to automatically set the stored Content-Type // post upload. In the case where a file extension is absent or the // lookup fails, the Content-Type is set to application/octet-stream. The - // Content-Type mappings can be purused here. + // Content-Type mappings can be pursued here. // // X-Bz-Content-Sha1 // required diff --git a/backend/box/api/types.go b/backend/box/api/types.go index c6e96b121..ea140264d 100644 --- a/backend/box/api/types.go +++ b/backend/box/api/types.go @@ -45,7 +45,7 @@ type Error struct { RequestID string `json:"request_id"` } -// Error returns a string for the error and statistifes the error interface +// Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status) if e.Message != "" { @@ -57,7 +57,7 @@ func (e *Error) Error() string { return out } -// Check Error statisfies the error interface +// Check Error satisfies the error interface var _ error = (*Error)(nil) // ItemFields are the fields needed for FileInfo diff --git a/backend/box/box.go b/backend/box/box.go index 1837339aa..3bf242e77 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -171,13 +171,13 @@ var retryErrorCodes = []int{ // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(resp *http.Response, err error) (bool, error) { - authRety := false + authRetry := false if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 { - authRety = true + authRetry = true fs.Debugf(nil, "Should retry: %v", err) } - return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err + return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // substitute reserved characters for box @@ -530,10 +530,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje // // The new object may have been created if an error is returned func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil) + existingObj, err := f.newObjectWithInfo(src.Remote(), nil) switch err { case nil: - return exisitingObj, exisitingObj.Update(in, src, options...) + return existingObj, existingObj.Update(in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(in, src) diff --git a/backend/cache/cache.go b/backend/cache/cache.go index 1b6c40e18..6d6e39361 100644 --- a/backend/cache/cache.go +++ b/backend/cache/cache.go @@ -576,7 +576,7 @@ The slice indices are similar to Python slices: start[:end] start is the 0 based chunk number from the beginning of the file to fetch inclusive. end is 0 based chunk number from the beginning -of the file to fetch exclisive. +of the file to fetch exclusive. Both values can be negative, in which case they count from the back of the file. The value "-5:" represents the last 5 chunks of a file. @@ -870,7 +870,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) { } } -// ChangeNotify can subsribe multiple callers +// ChangeNotify can subscribe multiple callers // this is coupled with the wrapped fs ChangeNotify (if it supports it) // and also notifies other caches (i.e VFS) to clear out whenever something changes func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) { @@ -1549,7 +1549,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } if srcObj.isTempFile() { - // we check if the feature is stil active + // we check if the feature is still active if f.opt.TempWritePath == "" { fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run") return nil, fs.ErrorCantCopy @@ -1625,7 +1625,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // if this is a temp object then we perform the changes locally if srcObj.isTempFile() { - // we check if the feature is stil active + // we check if the feature is still active if f.opt.TempWritePath == "" { fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run") return nil, fs.ErrorCantMove diff --git a/backend/crypt/cipher.go b/backend/crypt/cipher.go index 655216851..c3dd87768 100644 --- a/backend/crypt/cipher.go +++ b/backend/crypt/cipher.go @@ -748,7 +748,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) { if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) { return nil, fh.finishAndClose(ErrorEncryptedBadMagic) } - // retreive the nonce + // retrieve the nonce fh.nonce.fromBuf(readBuf[fileMagicSize:]) fh.initialNonce = fh.nonce return fh, nil diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index 23fe1c9c0..d2cc796d0 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -122,7 +122,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) { return newCipherForConfig(opt) } -// NewFs contstructs an Fs from the path, container:path +// NewFs constructs an Fs from the path, container:path func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) @@ -555,7 +555,7 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) { } // ComputeHash takes the nonce from o, and encrypts the contents of -// src with it, and calcuates the hash given by HashType on the fly +// src with it, and calculates the hash given by HashType on the fly // // Note that we break lots of encapsulation in this function. func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { diff --git a/backend/drive/drive.go b/backend/drive/drive.go index ddc24b916..3dfa1a51d 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -482,7 +482,7 @@ func (f *Fs) Features() *fs.Features { return f.features } -// shouldRetry determines whehter a given err rates being retried +// shouldRetry determines whether a given err rates being retried func shouldRetry(err error) (bool, error) { if err == nil { return false, nil @@ -863,7 +863,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { return } -// NewFs contstructs an Fs from the path, container:path +// NewFs constructs an Fs from the path, container:path func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) @@ -1483,7 +1483,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { in := make(chan listREntry, inputBuffer) out := make(chan error, fs.Config.Checkers) list := walk.NewListRHelper(callback) - overfflow := []listREntry{} + overflow := []listREntry{} cb := func(entry fs.DirEntry) error { mu.Lock() @@ -1493,7 +1493,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { case in <- listREntry{d.ID(), d.Remote()}: wg.Add(1) default: - overfflow = append(overfflow, listREntry{d.ID(), d.Remote()}) + overflow = append(overflow, listREntry{d.ID(), d.Remote()}) } } return list.Add(entry) @@ -1509,18 +1509,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { // wait until the all directories are processed wg.Wait() // if the input channel overflowed add the collected entries to the channel now - for len(overfflow) > 0 { + for len(overflow) > 0 { mu.Lock() - l := len(overfflow) - // only fill half of the channel to prevent entries beeing put into overfflow again + l := len(overflow) + // only fill half of the channel to prevent entries beeing put into overflow again if l > inputBuffer/2 { l = inputBuffer / 2 } wg.Add(l) - for _, d := range overfflow[:l] { + for _, d := range overflow[:l] { in <- d } - overfflow = overfflow[l:] + overflow = overflow[l:] mu.Unlock() // wait again for the completion of all directories @@ -1711,14 +1711,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error { return shouldRetry(err) }) if err != nil { - return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.Name, srcDir) + return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir) } } // rmdir (into trash) the now empty source directory fs.Infof(srcDir, "removing empty directory") err = f.rmdir(srcDir.ID(), true) if err != nil { - return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir) + return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir) } } return nil @@ -2137,7 +2137,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { // ChangeNotify calls the passed function with a path that has had changes. // If the implementation uses polling, it should adhere to the given interval. // -// Automatically restarts itself in case of unexpected behaviour of the remote. +// Automatically restarts itself in case of unexpected behavior of the remote. // // Close the returned channel to stop being notified. func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { diff --git a/backend/drive/upload.go b/backend/drive/upload.go index a6fb65aaa..b5dc7bbad 100644 --- a/backend/drive/upload.go +++ b/backend/drive/upload.go @@ -185,7 +185,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk // been 200 OK. // // So parse the response out of the body. We aren't expecting - // any other 2xx codes, so we parse it unconditionaly on + // any other 2xx codes, so we parse it unconditionally on // StatusCode if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil { return 598, err diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index c08d2638e..d30fe4ef3 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -213,7 +213,7 @@ func shouldRetry(err error) (bool, error) { } return true, err } - // Keep old behaviour for backward compatibility + // Keep old behavior for backward compatibility if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") { return true, err } @@ -239,7 +239,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) return } -// NewFs contstructs an Fs from the path, container:path +// NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go index fafe7bc53..97cd1ff3d 100644 --- a/backend/ftp/ftp.go +++ b/backend/ftp/ftp.go @@ -166,7 +166,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) { f.poolMu.Unlock() } -// NewFs contstructs an Fs from the path, container:path +// NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) { // defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err) // Parse config into Options struct diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go index 2bdddede4..5e7b65015 100644 --- a/backend/googlecloudstorage/googlecloudstorage.go +++ b/backend/googlecloudstorage/googlecloudstorage.go @@ -300,7 +300,7 @@ func (f *Fs) Features() *fs.Features { return f.features } -// shouldRetry determines whehter a given err rates being retried +// shouldRetry determines whether a given err rates being retried func shouldRetry(err error) (again bool, errOut error) { again = false if err != nil { @@ -348,7 +348,7 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) { return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil } -// NewFs contstructs an Fs from the path, bucket:path +// NewFs constructs an Fs from the path, bucket:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { var oAuthClient *http.Client diff --git a/backend/http/http.go b/backend/http/http.go index e927bea33..c155f31fc 100644 --- a/backend/http/http.go +++ b/backend/http/http.go @@ -251,7 +251,7 @@ func parseName(base *url.URL, name string) (string, error) { } // calculate the name relative to the base name = u.Path[len(base.Path):] - // musn't be empty + // mustn't be empty if name == "" { return "", errNameIsEmpty } diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go index 0dc33fd6b..99ab163aa 100644 --- a/backend/jottacloud/jottacloud.go +++ b/backend/jottacloud/jottacloud.go @@ -103,7 +103,7 @@ func init() { var jsonToken api.TokenJSON resp, err := srv.CallJSON(&opts, nil, &jsonToken) if err != nil { - // if 2fa is enabled the first request is expected to fail. we'lls do another request with the 2fa code as an additional http header + // if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header if resp != nil { if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" { fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n") @@ -163,7 +163,7 @@ func init() { Advanced: true, }, { Name: "upload_resume_limit", - Help: "Files bigger than this can be resumed if the upload failes.", + Help: "Files bigger than this can be resumed if the upload fail's.", Default: fs.SizeSuffix(10 * 1024 * 1024), Advanced: true, }}, @@ -361,7 +361,7 @@ func grantTypeFilter(req *http.Request) { } _ = req.Body.Close() - // make the refesh token upper case + // make the refresh token upper case refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1)) // set the new ReadCloser (with a dummy Close()) @@ -769,7 +769,7 @@ func (f *Fs) Purge() error { return f.purgeCheck("", false) } -// copyOrMoves copys or moves directories or files depending on the mthod parameter +// copyOrMoves copies or moves directories or files depending on the method parameter func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) { opts := rest.Opts{ Method: "POST", @@ -1080,7 +1080,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) { // we need a MD5 md5Hasher := md5.New() - // use the teeReader to write to the local file AND caclulate the MD5 while doing so + // use the teeReader to write to the local file AND calculate the MD5 while doing so teeReader := io.TeeReader(in, md5Hasher) // nothing to clean up by default diff --git a/backend/jottacloud/replace.go b/backend/jottacloud/replace.go index 92f0ce051..698726036 100644 --- a/backend/jottacloud/replace.go +++ b/backend/jottacloud/replace.go @@ -2,7 +2,7 @@ Translate file names for JottaCloud adapted from OneDrive -The following characters are JottaClous reserved characters, and can't +The following characters are JottaCloud reserved characters, and can't be used in JottaCloud folder and file names. jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~" diff --git a/backend/local/local.go b/backend/local/local.go index 039d5d86e..409b2d48c 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -225,10 +225,10 @@ func (f *Fs) Features() *fs.Features { return f.features } -// caseInsenstive returns whether the remote is case insensitive or not +// caseInsensitive returns whether the remote is case insensitive or not func (f *Fs) caseInsensitive() bool { // FIXME not entirely accurate since you can have case - // sensitive Fses on darwin and case insenstive Fses on linux. + // sensitive Fses on darwin and case insensitive Fses on linux. // Should probably check but that would involve creating a // file in the remote to be most accurate which probably isn't // desirable. @@ -288,7 +288,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj } return nil, err } - // Handle the odd case, that a symlink was specfied by name without the link suffix + // Handle the odd case, that a symlink was specified by name without the link suffix if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink { return nil, fs.ErrorObjectNotFound } @@ -958,7 +958,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio if o.translatedLink { if err == nil { - // Remove any current symlink or file, if one exsits + // Remove any current symlink or file, if one exists if _, err := os.Lstat(o.path); err == nil { if removeErr := os.Remove(o.path); removeErr != nil { fs.Errorf(o, "Failed to remove previous file: %v", removeErr) diff --git a/backend/mega/mega.go b/backend/mega/mega.go index 1e43fed07..e61dd319c 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -497,7 +497,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // Creates from the parameters passed in a half finished Object which // must have setMetaData called on it // -// Returns the dirNode, obect, leaf and error +// Returns the dirNode, object, leaf and error // // Used to create new objects func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) { @@ -523,10 +523,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil) + existingObj, err := f.newObjectWithInfo(src.Remote(), nil) switch err { case nil: - return exisitingObj, exisitingObj.Update(in, src, options...) + return existingObj, existingObj.Update(in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(in, src) @@ -847,14 +847,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error { return shouldRetry(err) }) if err != nil { - return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.GetName(), srcDir) + return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir) } } // rmdir (into trash) the now empty source directory fs.Infof(srcDir, "removing empty directory") err = f.deleteNode(srcDirNode) if err != nil { - return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir) + return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir) } } return nil @@ -1129,7 +1129,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio return errors.Wrap(err, "failed to finish upload") } - // If the upload succeded and the original object existed, then delete it + // If the upload succeeded and the original object existed, then delete it if o.info != nil { err = o.fs.deleteNode(o.info) if err != nil { diff --git a/backend/onedrive/api/types.go b/backend/onedrive/api/types.go index bf676cf8e..eff661855 100644 --- a/backend/onedrive/api/types.go +++ b/backend/onedrive/api/types.go @@ -25,7 +25,7 @@ type Error struct { } `json:"error"` } -// Error returns a string for the error and statistifes the error interface +// Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { out := e.ErrorInfo.Code if e.ErrorInfo.InnerError.Code != "" { @@ -35,7 +35,7 @@ func (e *Error) Error() string { return out } -// Check Error statisfies the error interface +// Check Error satisfies the error interface var _ error = (*Error)(nil) // Identity represents an identity of an actor. For example, and actor @@ -295,9 +295,9 @@ func (i *Item) GetID() string { return i.ID } -// GetDriveID returns a normalized ParentReferance of the item +// GetDriveID returns a normalized ParentReference of the item func (i *Item) GetDriveID() string { - return i.GetParentReferance().DriveID + return i.GetParentReference().DriveID } // GetName returns a normalized Name of the item @@ -398,8 +398,8 @@ func (i *Item) GetLastModifiedDateTime() Timestamp { return i.LastModifiedDateTime } -// GetParentReferance returns a normalized ParentReferance of the item -func (i *Item) GetParentReferance() *ItemReference { +// GetParentReference returns a normalized ParentReference of the item +func (i *Item) GetParentReference() *ItemReference { if i.IsRemote() && i.ParentReference == nil { return i.RemoteItem.ParentReference } diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 242d01e06..b54b3ce7d 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -324,13 +324,13 @@ var retryErrorCodes = []int{ // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func shouldRetry(resp *http.Response, err error) (bool, error) { - authRety := false + authRetry := false if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 { - authRety = true + authRetry = true fs.Debugf(nil, "Should retry: %v", err) } - return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err + return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID. diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go index 994e91579..5d8cc53a2 100644 --- a/backend/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -119,7 +119,7 @@ func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } -// NewFs contstructs an Fs from the path, bucket:path +// NewFs constructs an Fs from the path, bucket:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) diff --git a/backend/opendrive/types.go b/backend/opendrive/types.go index 1bd732857..2cbcf92c1 100644 --- a/backend/opendrive/types.go +++ b/backend/opendrive/types.go @@ -13,7 +13,7 @@ type Error struct { } `json:"error"` } -// Error statisfies the error interface +// Error satisfies the error interface func (e *Error) Error() string { return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code) } diff --git a/backend/pcloud/api/types.go b/backend/pcloud/api/types.go index 408be792e..17baf8904 100644 --- a/backend/pcloud/api/types.go +++ b/backend/pcloud/api/types.go @@ -41,7 +41,7 @@ type Error struct { ErrorString string `json:"error"` } -// Error returns a string for the error and statistifes the error interface +// Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { return fmt.Sprintf("pcloud error: %s (%d)", e.ErrorString, e.Result) } @@ -58,7 +58,7 @@ func (e *Error) Update(err error) error { return e } -// Check Error statisfies the error interface +// Check Error satisfies the error interface var _ error = (*Error)(nil) // Item describes a folder or a file as returned by Get Folder Items and others diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index fdd57b02b..7a75932de 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -385,7 +385,7 @@ func fileIDtoNumber(fileID string) string { if len(fileID) > 0 && fileID[0] == 'f' { return fileID[1:] } - fs.Debugf(nil, "Invalid filee id %q", fileID) + fs.Debugf(nil, "Invalid file id %q", fileID) return fileID } diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index 57b1b4408..ae684a5af 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -449,7 +449,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { } _, err = bucketInit.PutObject(key, &req) if err != nil { - fs.Debugf(f, "Copied Faild, API Error: %v", err) + fs.Debugf(f, "Copy Failed, API Error: %v", err) return nil, err } return f.NewObject(remote) @@ -756,7 +756,7 @@ func (f *Fs) Mkdir(dir string) error { } switch *statistics.Status { case "deleted": - fs.Debugf(f, "Wiat for qingstor sync bucket status, retries: %d", retries) + fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries) time.Sleep(time.Second * 1) retries++ continue @@ -875,7 +875,7 @@ func (o *Object) readMetaData() (err error) { fs.Debugf(o, "Read metadata of key: %s", key) resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{}) if err != nil { - fs.Debugf(o, "Read metadata faild, API Error: %v", err) + fs.Debugf(o, "Read metadata failed, API Error: %v", err) if e, ok := err.(*qsErr.QingStorError); ok { if e.StatusCode == http.StatusNotFound { return fs.ErrorObjectNotFound diff --git a/backend/qingstor/upload.go b/backend/qingstor/upload.go index 1359b14e0..2ff2220aa 100644 --- a/backend/qingstor/upload.go +++ b/backend/qingstor/upload.go @@ -163,7 +163,7 @@ func (u *uploader) singlePartUpload(buf io.Reader, size int64) error { _, err := bucketInit.PutObject(u.cfg.key, &req) if err == nil { - fs.Debugf(u, "Upload single objcet finished") + fs.Debugf(u, "Upload single object finished") } return err } diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 6f8bfe669..b46768408 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -237,10 +237,10 @@ func init() { Help: "EU Cross Region Amsterdam Private Endpoint", }, { Value: "s3.eu-gb.objectstorage.softlayer.net", - Help: "Great Britan Endpoint", + Help: "Great Britain Endpoint", }, { Value: "s3.eu-gb.objectstorage.service.networklayer.com", - Help: "Great Britan Private Endpoint", + Help: "Great Britain Private Endpoint", }, { Value: "s3.ap-geo.objectstorage.softlayer.net", Help: "APAC Cross Regional Endpoint", @@ -450,7 +450,7 @@ func init() { Help: "US East Region Flex", }, { Value: "us-south-standard", - Help: "US Sout hRegion Standard", + Help: "US South Region Standard", }, { Value: "us-south-vault", Help: "US South Region Vault", @@ -474,16 +474,16 @@ func init() { Help: "EU Cross Region Flex", }, { Value: "eu-gb-standard", - Help: "Great Britan Standard", + Help: "Great Britain Standard", }, { Value: "eu-gb-vault", - Help: "Great Britan Vault", + Help: "Great Britain Vault", }, { Value: "eu-gb-cold", - Help: "Great Britan Cold", + Help: "Great Britain Cold", }, { Value: "eu-gb-flex", - Help: "Great Britan Flex", + Help: "Great Britain Flex", }, { Value: "ap-standard", Help: "APAC Standard", @@ -842,7 +842,7 @@ var retryErrorCodes = []int{ func (f *Fs) shouldRetry(err error) (bool, error) { // If this is an awserr object, try and extract more useful information to determine if we should retry if awsError, ok := err.(awserr.Error); ok { - // Simple case, check the original embedded error in case it's generically retriable + // Simple case, check the original embedded error in case it's generically retryable if fserrors.ShouldRetry(awsError.OrigErr()) { return true, err } diff --git a/backend/swift/swift.go b/backend/swift/swift.go index f56cb690d..aa72adc57 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -430,7 +430,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n return f, nil } -// NewFs contstructs an Fs from the path, container:path +// NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) diff --git a/backend/union/union.go b/backend/union/union.go index fa2dc37c1..bca6a0b78 100644 --- a/backend/union/union.go +++ b/backend/union/union.go @@ -177,8 +177,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { // At least one value will be written to the channel, // specifying the initial value and updated values might // follow. A 0 Duration should pause the polling. -// The ChangeNotify implemantion must empty the channel -// regulary. When the channel gets closed, the implemantion +// The ChangeNotify implementation must empty the channel +// regularly. When the channel gets closed, the implementation // should stop polling and release resources. func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) { var remoteChans []chan time.Duration diff --git a/backend/webdav/api/types.go b/backend/webdav/api/types.go index f9cf2d349..eab79dccf 100644 --- a/backend/webdav/api/types.go +++ b/backend/webdav/api/types.go @@ -124,7 +124,7 @@ type PropValue struct { Value string `xml:",chardata"` } -// Error is used to desribe webdav errors +// Error is used to describe webdav errors // // // Sabre\DAV\Exception\NotFound @@ -137,7 +137,7 @@ type Error struct { StatusCode int } -// Error returns a string for the error and statistifes the error interface +// Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { var out []string if e.Message != "" { diff --git a/backend/webdav/odrvcookie/fetch.go b/backend/webdav/odrvcookie/fetch.go index 2a7a3ee27..65509c455 100644 --- a/backend/webdav/odrvcookie/fetch.go +++ b/backend/webdav/odrvcookie/fetch.go @@ -102,7 +102,7 @@ func (ca *CookieAuth) Cookies() (*CookieResponse, error) { func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error) { spRoot, err := url.Parse(ca.endpoint) if err != nil { - return nil, errors.Wrap(err, "Error while contructing endpoint URL") + return nil, errors.Wrap(err, "Error while constructing endpoint URL") } u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0") @@ -121,7 +121,7 @@ func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error Jar: jar, } - // Send the previously aquired Token as a Post parameter + // Send the previously acquired Token as a Post parameter if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Succ.Token)); err != nil { return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v") } diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go index f77a37ccd..54e11f505 100644 --- a/backend/webdav/webdav.go +++ b/backend/webdav/webdav.go @@ -249,7 +249,7 @@ func errorHandler(resp *http.Response) error { return errResponse } -// addShlash makes sure s is terminated with a / if non empty +// addSlash makes sure s is terminated with a / if non empty func addSlash(s string) string { if s != "" && !strings.HasSuffix(s, "/") { s += "/" diff --git a/backend/yandex/api/types.go b/backend/yandex/api/types.go index f833517e7..b3d7789ab 100644 --- a/backend/yandex/api/types.go +++ b/backend/yandex/api/types.go @@ -56,7 +56,7 @@ type AsyncInfo struct { Templated bool `json:"templated"` } -// AsyncStatus is returned when requesting the status of an async operations. Possble values in-progress, success, failure +// AsyncStatus is returned when requesting the status of an async operations. Possible values in-progress, success, failure type AsyncStatus struct { Status string `json:"status"` } diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go index b48463fff..624c425e6 100644 --- a/backend/yandex/yandex.go +++ b/backend/yandex/yandex.go @@ -634,7 +634,7 @@ func (f *Fs) Purge() error { return f.purgeCheck("", false) } -// copyOrMoves copys or moves directories or files depending on the mthod parameter +// copyOrMoves copies or moves directories or files depending on the method parameter func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) { opts := rest.Opts{ Method: "POST", @@ -1107,7 +1107,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio return err } - //if file uploaded sucessfully then return metadata + //if file uploaded successfully then return metadata o.modTime = modTime o.md5sum = "" // according to unit tests after put the md5 is empty. o.size = int64(in1.BytesRead()) // better solution o.readMetaData() ?