From 4aee9622339ca1fc4b202400e9bf733d56750945 Mon Sep 17 00:00:00 2001 From: Martin Michlmayr Date: Wed, 20 May 2020 18:39:20 +0800 Subject: [PATCH] doc: fix typos throughout docs and code --- CONTRIBUTING.md | 4 ++-- backend/amazonclouddrive/amazonclouddrive.go | 4 ++-- backend/azureblob/azureblob.go | 4 ++-- backend/box/box.go | 2 +- backend/crypt/cipher.go | 2 +- backend/crypt/cipher_test.go | 2 +- backend/crypt/crypt.go | 4 ++-- backend/crypt/crypt_internal_test.go | 2 +- backend/drive/drive.go | 24 ++++++++++---------- backend/ftp/ftp.go | 2 +- backend/googlephotos/api/types.go | 2 +- backend/googlephotos/pattern.go | 2 +- backend/hubic/auth.go | 2 +- backend/jottacloud/jottacloud.go | 10 ++++---- backend/koofr/koofr.go | 2 +- backend/mailru/mailru.go | 2 +- backend/mega/mega.go | 2 +- backend/onedrive/api/types.go | 6 ++--- backend/onedrive/onedrive.go | 4 ++-- backend/opendrive/opendrive.go | 2 +- backend/opendrive/types.go | 10 ++++---- backend/pcloud/pcloud.go | 2 +- backend/premiumizeme/api/types.go | 2 +- backend/qingstor/qingstor.go | 2 +- backend/qingstor/upload.go | 10 ++++---- backend/s3/s3.go | 2 +- backend/seafile/seafile.go | 2 +- backend/seafile/webapi.go | 2 +- backend/sftp/sftp.go | 4 ++-- backend/sharefile/api/types.go | 2 +- backend/swift/swift.go | 4 ++-- backend/tardigrade/fs.go | 2 +- backend/union/errors.go | 2 +- backend/union/policy/all.go | 2 +- backend/union/policy/epall.go | 4 ++-- backend/union/policy/epff.go | 6 ++--- backend/union/policy/eplfs.go | 6 ++--- backend/union/policy/eplno.go | 6 ++--- backend/union/policy/eplus.go | 6 ++--- backend/union/policy/epmfs.go | 6 ++--- backend/union/policy/eprand.go | 6 ++--- backend/union/policy/newest.go | 8 +++---- backend/union/policy/policy.go | 6 ++--- backend/union/policy/rand.go | 6 ++--- backend/union/upstream/upstream.go | 6 ++--- backend/webdav/odrvcookie/fetch.go | 4 ++-- backend/yandex/yandex.go | 6 ++--- cmd/cmount/mount.go | 2 +- cmd/copyurl/copyurl.go | 2 +- cmd/mount/mount.go | 2 +- cmd/mount2/mount.go | 2 +- cmd/mountlib/rc_test.go | 2 +- cmd/serve/dlna/cds.go | 2 +- cmd/serve/dlna/dlna.go | 2 +- cmd/serve/httplib/serve/dir.go | 4 ++-- cmd/serve/proxy/proxy.go | 2 +- cmd/serve/sftp/connection.go | 2 +- cmd/tree/tree.go | 2 +- docs/content/cache.md | 2 +- docs/content/changelog.md | 20 ++++++++-------- docs/content/jottacloud.md | 2 +- fs/accounting/accounting.go | 2 +- fs/accounting/stats_groups.go | 2 +- fs/cache/cache.go | 6 ++--- fs/chunkedreader/chunkedreader.go | 4 ++-- fs/config.go | 2 +- fs/filter/filter.go | 2 +- fs/fs.go | 4 ++-- fs/fserrors/error.go | 2 +- fs/fshttp/http.go | 2 +- fs/log/caller_hook.go | 4 ++-- fs/operations/rc.go | 2 +- fs/operations/reopen_test.go | 2 +- fs/options.go | 2 +- fs/rc/cache.go | 4 ++-- fs/rc/jobs/job.go | 2 +- fs/rc/webgui.go | 2 +- fs/sync/pipe.go | 2 +- fstest/fstests/fstests.go | 2 +- fstest/mockobject/mockobject.go | 2 +- fstest/test_all/test_all.go | 2 +- lib/atexit/atexit.go | 2 +- lib/bucket/bucket.go | 2 +- lib/cache/cache.go | 4 ++-- lib/encoder/encoder.go | 2 +- lib/encoder/internal/gen/main.go | 2 +- lib/readers/repeatable.go | 4 ++-- lib/readers/repeatable_test.go | 2 +- lib/rest/rest.go | 4 ++-- vfs/file.go | 2 +- vfs/file_test.go | 2 +- vfs/read.go | 4 ++-- vfs/read_write.go | 4 ++-- vfs/vfscache/vfscache_test.go | 2 +- vfs/vfscommon/options.go | 2 +- vfs/write.go | 2 +- 96 files changed, 174 insertions(+), 174 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d3be5d6ff..f0cd2d23a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -158,7 +158,7 @@ with modules beneath. * fserrors - rclone specific error handling * fshttp - http handling for rclone * fspath - path handling for rclone - * hash - defines rclones hash types and functions + * hash - defines rclone's hash types and functions * list - list a remote * log - logging facilities * march - iterates directories in lock step @@ -295,7 +295,7 @@ If you need to update a dependency then run GO111MODULE=on go get -u github.com/pkg/errors GO111MODULE=on go mod vendor -Check in in a single commit as above. +Check in a single commit as above. ## Updating all the dependencies ## diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go index 7d026eb95..01b215a4c 100644 --- a/backend/amazonclouddrive/amazonclouddrive.go +++ b/backend/amazonclouddrive/amazonclouddrive.go @@ -169,7 +169,7 @@ type Fs struct { tokenRenewer *oauthutil.Renew // renew the token on expiry } -// Object describes a acd object +// Object describes an acd object // // Will definitely have info but maybe not meta type Object struct { @@ -229,7 +229,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { } // Work around receiving this error sporadically on authentication // - // HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"} + // HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"} if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") { fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry") return true, err diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 9a6568b42..ea760e0b5 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -201,7 +201,7 @@ type Fs struct { pool *pool.Pool // memory pool } -// Object describes a azure object +// Object describes an azure object type Object struct { fs *Fs // what this object is part of remote string // The remote path @@ -338,7 +338,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { } // httpClientFactory creates a Factory object that sends HTTP requests -// to a rclone's http.Client. +// to an rclone's http.Client. // // copied from azblob.newDefaultHTTPClientFactory func httpClientFactory(client *http.Client) pipeline.Factory { diff --git a/backend/box/box.go b/backend/box/box.go index ce9c6f38c..279fc55db 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -296,7 +296,7 @@ func (f *Fs) Features() *fs.Features { return f.features } -// parsePath parses an box 'url' +// parsePath parses a box 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return diff --git a/backend/crypt/cipher.go b/backend/crypt/cipher.go index 5213a0c67..810225fd5 100644 --- a/backend/crypt/cipher.go +++ b/backend/crypt/cipher.go @@ -217,7 +217,7 @@ func decodeFileName(in string) ([]byte, error) { // 2003 paper "A Parallelizable Enciphering Mode" by Halevi and // Rogaway. // -// This makes for determinstic encryption which is what we want - the +// This makes for deterministic encryption which is what we want - the // same filename must encrypt to the same thing. // // This means that diff --git a/backend/crypt/cipher_test.go b/backend/crypt/cipher_test.go index f517dcdbd..ff9bb23c5 100644 --- a/backend/crypt/cipher_test.go +++ b/backend/crypt/cipher_test.go @@ -929,7 +929,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) { assert.Equal(t, 0, n) } - // Now try decoding it with a open/seek + // Now try decoding it with an open/seek for _, offset := range trials { for _, limit := range limits { if offset+limit > len(plaintext) { diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index c34e3ff66..16735ac57 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -241,7 +241,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) { *entries = append(*entries, f.newObject(obj)) } -// Encrypt an directory file name to entries. +// Encrypt a directory file name to entries. func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) { remote := dir.Remote() decryptedRemote, err := f.cipher.DecryptDirName(remote) @@ -943,7 +943,7 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) { if srcObj, ok = o.ObjectInfo.(fs.Object); ok { // Prefer direct interface assertion } else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok { - // Otherwise likely is a operations.OverrideRemote + // Otherwise likely is an operations.OverrideRemote srcObj = do.UnWrap() } else { return "", nil diff --git a/backend/crypt/crypt_internal_test.go b/backend/crypt/crypt_internal_test.go index 33b093835..38e064977 100644 --- a/backend/crypt/crypt_internal_test.go +++ b/backend/crypt/crypt_internal_test.go @@ -82,7 +82,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) { var oi fs.ObjectInfo = obj if wrap { - // wrap the object in a fs.ObjectUnwrapper if required + // wrap the object in an fs.ObjectUnwrapper if required oi = testWrapper{oi} } diff --git a/backend/drive/drive.go b/backend/drive/drive.go index fdad87af0..edc5df5da 100755 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -1220,7 +1220,7 @@ func (f *Fs) getFileFields() (fields googleapi.Field) { return fields } -// newRegularObject creates a fs.Object for a normal drive.File +// newRegularObject creates an fs.Object for a normal drive.File func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object { // wipe checksum if SkipChecksumGphotos and file is type Photo or Video if f.opt.SkipChecksumGphotos { @@ -1239,7 +1239,7 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object { } } -// newDocumentObject creates a fs.Object for a google docs drive.File +// newDocumentObject creates an fs.Object for a google docs drive.File func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) { mediaType, _, err := mime.ParseMediaType(exportMimeType) if err != nil { @@ -1270,7 +1270,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor }, nil } -// newLinkObject creates a fs.Object that represents a link a google docs drive.File +// newLinkObject creates an fs.Object that represents a link a google docs drive.File func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) { t := linkTemplate(exportMimeType) if t == nil { @@ -1296,9 +1296,9 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim }, nil } -// newObjectWithInfo creates a fs.Object for any drive.File +// newObjectWithInfo creates an fs.Object for any drive.File // -// When the drive.File cannot be represented as a fs.Object it will return (nil, nil). +// When the drive.File cannot be represented as an fs.Object it will return (nil, nil). func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) { // If item has MD5 sum or a length it is a file stored on drive if info.Md5Checksum != "" || info.Size > 0 { @@ -1309,9 +1309,9 @@ func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, erro return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument) } -// newObjectWithExportInfo creates a fs.Object for any drive.File and the result of findExportFormat +// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat // -// When the drive.File cannot be represented as a fs.Object it will return (nil, nil). +// When the drive.File cannot be represented as an fs.Object it will return (nil, nil). func (f *Fs) newObjectWithExportInfo( remote string, info *drive.File, extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) { @@ -1629,7 +1629,7 @@ func (s listRSlices) Less(i, j int) bool { return s.dirs[i] < s.dirs[j] } -// listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry. +// listRRunner will read dirIDs from the in channel, perform the file listing and call cb with each DirEntry. // // In each cycle it will read up to grouping entries from the in channel without blocking. // If an error occurs it will be send to the out channel and then return. Once the in channel is closed, @@ -1788,7 +1788,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( for len(overflow) > 0 { mu.Lock() l := len(overflow) - // only fill half of the channel to prevent entries beeing put into overflow again + // only fill half of the channel to prevent entries being put into overflow again if l > inputBuffer/2 { l = inputBuffer / 2 } @@ -1922,8 +1922,8 @@ func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) return newItem, nil } -// itemToDirEntry converts a drive.File to a fs.DirEntry. -// When the drive.File cannot be represented as a fs.DirEntry +// itemToDirEntry converts a drive.File to an fs.DirEntry. +// When the drive.File cannot be represented as an fs.DirEntry // (nil, nil) is returned. func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) { switch { @@ -3144,7 +3144,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio return req, res, nil } -// openDocumentFile represents an documentObject open for reading. +// openDocumentFile represents a documentObject open for reading. // Updates the object size after read successfully. type openDocumentFile struct { o *documentObject // Object we are reading for diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go index c44c01134..8119b19ca 100644 --- a/backend/ftp/ftp.go +++ b/backend/ftp/ftp.go @@ -72,7 +72,7 @@ func init() { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, - // The FTP protocal can't handle trailing spaces (for instance + // The FTP protocol can't handle trailing spaces (for instance // pureftpd turns them into _) // // proftpd can't handle '*' in file names diff --git a/backend/googlephotos/api/types.go b/backend/googlephotos/api/types.go index 46b338448..d26cfc714 100644 --- a/backend/googlephotos/api/types.go +++ b/backend/googlephotos/api/types.go @@ -17,7 +17,7 @@ type Error struct { Details ErrorDetails `json:"error"` } -// Error statisfies error interface +// Error satisfies error interface func (e *Error) Error() string { return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status) } diff --git a/backend/googlephotos/pattern.go b/backend/googlephotos/pattern.go index 961bbe585..2cb5a0d3b 100644 --- a/backend/googlephotos/pattern.go +++ b/backend/googlephotos/pattern.go @@ -224,7 +224,7 @@ func (ds dirPatterns) mustCompile() dirPatterns { return ds } -// match finds the path passed in in the matching structure and +// match finds the path passed in the matching structure and // returns the parameters and a pointer to the match, or nil. func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) { itemPath = strings.Trim(itemPath, "/") diff --git a/backend/hubic/auth.go b/backend/hubic/auth.go index 62213c35f..7e27b7f2e 100644 --- a/backend/hubic/auth.go +++ b/backend/hubic/auth.go @@ -21,7 +21,7 @@ func newAuth(f *Fs) *auth { } } -// Request constructs a http.Request for authentication +// Request constructs an http.Request for authentication // // returns nil for not needed func (a *auth) Request(*swift.Connection) (r *http.Request, err error) { diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go index f8ecad41e..fe9ce39d9 100644 --- a/backend/jottacloud/jottacloud.go +++ b/backend/jottacloud/jottacloud.go @@ -235,7 +235,7 @@ func (f *Fs) Features() *fs.Features { return f.features } -// parsePath parses an box 'url' +// parsePath parses a box 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return @@ -454,7 +454,7 @@ func errorHandler(resp *http.Response) error { return errResponse } -// Jottacloud want's '+' to be URL encoded even though the RFC states it's not reserved +// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved func urlPathEscape(in string) string { return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1) } @@ -464,7 +464,7 @@ func (f *Fs) filePathRaw(file string) string { return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file))) } -// filePath returns a escaped file path (f.root, file) +// filePath returns an escaped file path (f.root, file) func (f *Fs) filePath(file string) string { return urlPathEscape(f.filePathRaw(file)) } @@ -493,7 +493,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { return nil, errors.New("Outdated config - please reconfigure this backend") } - // if custome endpoints are set use them else stick with defaults + // if custom endpoints are set use them else stick with defaults if tokenURL, ok := m.Get(configTokenURL); ok { oauthConfig.Endpoint.TokenURL = tokenURL // jottacloud is weird. we need to use the tokenURL as authURL @@ -1105,7 +1105,7 @@ func (o *Object) Remote() string { return o.remote } -// filePath returns a escaped file path (f.root, remote) +// filePath returns an escaped file path (f.root, remote) func (o *Object) filePath() string { return o.fs.filePath(o.remote) } diff --git a/backend/koofr/koofr.go b/backend/koofr/koofr.go index b97c0a9e4..3d218569f 100644 --- a/backend/koofr/koofr.go +++ b/backend/koofr/koofr.go @@ -421,7 +421,7 @@ func translateErrorsObject(err error) error { } // mkdir creates a directory at the given remote path. Creates ancestors if -// neccessary +// necessary func (f *Fs) mkdir(fullPath string) error { if fullPath == "/" { return nil diff --git a/backend/mailru/mailru.go b/backend/mailru/mailru.go index f163eb21a..5f317953e 100644 --- a/backend/mailru/mailru.go +++ b/backend/mailru/mailru.go @@ -402,7 +402,7 @@ func (q *quirks) parseQuirks(option string) { // "Accept-Encoding: gzip" header. However, enabling compression // might be good for performance. // Use this quirk to investigate the performance impact. - // Remove this quirk if perfomance does not improve. + // Remove this quirk if performance does not improve. q.gzip = true case "insecure": // The mailru disk-o protocol is not documented. To compare HTTP diff --git a/backend/mega/mega.go b/backend/mega/mega.go index 1802fa962..784a2a039 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -150,7 +150,7 @@ func (f *Fs) Features() *fs.Features { return f.features } -// parsePath parses an mega 'url' +// parsePath parses a mega 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return diff --git a/backend/onedrive/api/types.go b/backend/onedrive/api/types.go index eff661855..346829afa 100644 --- a/backend/onedrive/api/types.go +++ b/backend/onedrive/api/types.go @@ -272,19 +272,19 @@ type CreateShareLinkResponse struct { } `json:"link"` } -// AsyncOperationStatus provides information on the status of a asynchronous job progress. +// AsyncOperationStatus provides information on the status of an asynchronous job progress. // // The following API calls return AsyncOperationStatus resources: // // Copy Item // Upload From URL type AsyncOperationStatus struct { - PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete. + PercentageComplete float64 `json:"percentageComplete"` // A float value between 0 and 100 that indicates the percentage complete. Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting" } // GetID returns a normalized ID of the item -// If DriveID is known it will be prefixed to the ID with # seperator +// If DriveID is known it will be prefixed to the ID with # separator // Can be parsed using onedrive.parseNormalizedID(normalizedID) func (i *Item) GetID() string { if i.IsRemote() && i.RemoteItem.ID != "" { diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 9c9eb1da2..c18760bb2 100755 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -396,7 +396,7 @@ func (f *Fs) Features() *fs.Features { return f.features } -// parsePath parses an one drive 'url' +// parsePath parses a one drive 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return @@ -1310,7 +1310,7 @@ func (f *Fs) Hashes() hash.Set { return hash.Set(QuickXorHashType) } -// PublicLink returns a link for downloading without accout. +// PublicLink returns a link for downloading without account. func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote)) if err != nil { diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go index 3f8c05f83..3ab0a7606 100644 --- a/backend/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -677,7 +677,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options . } if "" == o.id { - // We need to create a ID for this file + // We need to create an ID for this file var resp *http.Response response := createFileResponse{} err := o.fs.pacer.Call(func() (bool, error) { diff --git a/backend/opendrive/types.go b/backend/opendrive/types.go index 92aaf29ad..4b68acbea 100644 --- a/backend/opendrive/types.go +++ b/backend/opendrive/types.go @@ -18,13 +18,13 @@ func (e *Error) Error() string { return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code) } -// Account describes a OpenDRIVE account +// Account describes an OpenDRIVE account type Account struct { Username string `json:"username"` Password string `json:"passwd"` } -// UserSessionInfo describes a OpenDRIVE session +// UserSessionInfo describes an OpenDRIVE session type UserSessionInfo struct { Username string `json:"username"` Password string `json:"passwd"` @@ -45,7 +45,7 @@ type UserSessionInfo struct { PartnerUsersDomain string `json:"PartnerUsersDomain"` } -// FolderList describes a OpenDRIVE listing +// FolderList describes an OpenDRIVE listing type FolderList struct { // DirUpdateTime string `json:"DirUpdateTime,string"` Name string `json:"Name"` @@ -56,7 +56,7 @@ type FolderList struct { Files []File `json:"Files"` } -// Folder describes a OpenDRIVE folder +// Folder describes an OpenDRIVE folder type Folder struct { FolderID string `json:"FolderID"` Name string `json:"Name"` @@ -109,7 +109,7 @@ type removeFolder struct { FolderID string `json:"folder_id"` } -// File describes a OpenDRIVE file +// File describes an OpenDRIVE file type File struct { FileID string `json:"FileId"` FileHash string `json:"FileHash"` diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index c2735014d..67b191f7d 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -152,7 +152,7 @@ func (f *Fs) Features() *fs.Features { return f.features } -// parsePath parses an pcloud 'url' +// parsePath parses a pcloud 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return diff --git a/backend/premiumizeme/api/types.go b/backend/premiumizeme/api/types.go index f5a8cce6c..1dccd4ff9 100644 --- a/backend/premiumizeme/api/types.go +++ b/backend/premiumizeme/api/types.go @@ -10,7 +10,7 @@ type Response struct { Status string `json:"status"` } -// Error statisfies the error interface +// Error satisfies the error interface func (e *Response) Error() string { return fmt.Sprintf("%s: %s", e.Status, e.Message) } diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index 6e58346a9..9e56d4c23 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -203,7 +203,7 @@ func (o *Object) split() (bucket, bucketPath string) { return o.fs.split(o.remote) } -// Split an URL into three parts: protocol host and port +// Split a URL into three parts: protocol host and port func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) { /* Pattern to match an endpoint, diff --git a/backend/qingstor/upload.go b/backend/qingstor/upload.go index 32fc33d26..7fb2e2b15 100644 --- a/backend/qingstor/upload.go +++ b/backend/qingstor/upload.go @@ -22,7 +22,7 @@ const ( // maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor // maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor - maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload + maxMultiParts = 10000 // The maximum allowed number of parts in a multi-part upload ) const ( @@ -168,7 +168,7 @@ func (u *uploader) singlePartUpload(buf io.Reader, size int64) error { return err } -// Upload upload a object into QingStor +// Upload upload an object into QingStor func (u *uploader) upload() error { u.init() @@ -297,7 +297,7 @@ func (mu *multiUploader) send(c chunk) error { return err } -// complete complete an multipart upload +// complete complete a multipart upload func (mu *multiUploader) complete() error { var err error if err = mu.getErr(); err != nil { @@ -324,7 +324,7 @@ func (mu *multiUploader) complete() error { return err } -// abort abort an multipart upload +// abort abort a multipart upload func (mu *multiUploader) abort() error { var err error bucketInit, _ := mu.bucketInit() @@ -342,7 +342,7 @@ func (mu *multiUploader) abort() error { // multiPartUpload upload a multiple object into QingStor func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) { - // Initiate an multi-part upload + // Initiate a multi-part upload if err = mu.initiate(); err != nil { return err } diff --git a/backend/s3/s3.go b/backend/s3/s3.go index f7e1b6052..0ce6f14ed 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -677,7 +677,7 @@ isn't set then "acl" is used instead.`, }}, }, { Name: "sse_customer_key", - Help: "If using SSE-C you must provide the secret encyption key used to encrypt/decrypt your data.", + Help: "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.", Provider: "AWS,Ceph,Minio", Advanced: true, Examples: []fs.OptionExample{{ diff --git a/backend/seafile/seafile.go b/backend/seafile/seafile.go index 024f04957..a4f5f2a87 100644 --- a/backend/seafile/seafile.go +++ b/backend/seafile/seafile.go @@ -212,7 +212,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { } fs.Debugf(nil, "Seafile server version %s", serverInfo.Version) - // We don't support bellow seafile v6.0 (version 6.0 is already more than 3 years old) + // We don't support lower than seafile v6.0 (version 6.0 is already more than 3 years old) serverVersion := semver.New(serverInfo.Version) if serverVersion.Major < 6 { return nil, errors.New("unsupported Seafile server (version < 6.0)") diff --git a/backend/seafile/webapi.go b/backend/seafile/webapi.go index d751561f2..bf9b43af2 100644 --- a/backend/seafile/webapi.go +++ b/backend/seafile/webapi.go @@ -1058,7 +1058,7 @@ func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname s // No luck with JSON input with the older api2 postParameters := url.Values{ "operation": {"rename"}, - "reloaddir": {"true"}, // This is an undocumented trick to avoid a http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py) + "reloaddir": {"true"}, // This is an undocumented trick to avoid an http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py) "newname": {f.opt.Enc.FromStandardName(newname)}, } diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index 660e2a02f..f67852af1 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -485,7 +485,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { return NewFsWithConnection(ctx, name, root, m, opt, sshConfig) } -// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to +// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to // the host specified in the ssh.ClientConfig func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) { f := &Fs{ @@ -1036,7 +1036,7 @@ func parseHash(bytes []byte) string { // Parses the byte array output from the SSH session // returned by an invocation of df into -// the disk size, used space, and avaliable space on the disk, in that order. +// the disk size, used space, and available space on the disk, in that order. // Only works when `df` has output info on only one disk func parseUsage(bytes []byte) (spaceTotal int64, spaceUsed int64, spaceAvail int64) { spaceTotal, spaceUsed, spaceAvail = -1, -1, -1 diff --git a/backend/sharefile/api/types.go b/backend/sharefile/api/types.go index 8d96228c3..282087bf3 100644 --- a/backend/sharefile/api/types.go +++ b/backend/sharefile/api/types.go @@ -102,7 +102,7 @@ type UploadSpecification struct { MaxNumberOfThreads int `json:"MaxNumberOfThreads"` // Specifies the max number of chunks that can be sent simultaneously for threaded uploads } -// UploadFinishResponse is returnes from calling UploadSpecification.FinishURI +// UploadFinishResponse is returns from calling UploadSpecification.FinishURI type UploadFinishResponse struct { Error bool `json:"error"` ErrorMessage string `json:"errorMessage"` diff --git a/backend/swift/swift.go b/backend/swift/swift.go index e7f0f2417..313327a8b 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -284,7 +284,7 @@ var retryErrorCodes = []int{ // shouldRetry returns a boolean as to whether this err deserves to be // retried. It returns the err as a convenience func shouldRetry(err error) (bool, error) { - // If this is an swift.Error object extract the HTTP error code + // If this is a swift.Error object extract the HTTP error code if swiftError, ok := err.(*swift.Error); ok { for _, e := range retryErrorCodes { if swiftError.StatusCode == e { @@ -1253,7 +1253,7 @@ func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) { fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer) e := o.fs.c.ObjectDelete(segmentsContainer, v) if e != nil { - fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, segmentsContainer, e) + fs.Errorf(o, "Error occurred in delete segment file %q on %q , error: %q", v, segmentsContainer, e) } } } diff --git a/backend/tardigrade/fs.go b/backend/tardigrade/fs.go index d2aa43134..05819a8a8 100644 --- a/backend/tardigrade/fs.go +++ b/backend/tardigrade/fs.go @@ -669,7 +669,7 @@ func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) { // requirements. In particular, libuplink requires a trailing slash for // listings, but rclone does not always provide one. Further, depending on how // the path was initially path normalization may have removed it (e.g. a -// trailing slash from the CLI is removed before it ever get's to the backend +// trailing slash from the CLI is removed before it ever gets to the backend // code). func newPrefix(prefix string) string { if prefix == "" { diff --git a/backend/union/errors.go b/backend/union/errors.go index 1249dd540..46259b595 100644 --- a/backend/union/errors.go +++ b/backend/union/errors.go @@ -33,7 +33,7 @@ func (e Errors) FilterNil() Errors { return ne } -// Err returns a error interface that filtered nil, +// Err returns an error interface that filtered nil, // or nil if no non-nil Error is presented. func (e Errors) Err() error { ne := e.FilterNil() diff --git a/backend/union/policy/all.go b/backend/union/policy/all.go index 06589220d..1de36da2a 100644 --- a/backend/union/policy/all.go +++ b/backend/union/policy/all.go @@ -31,7 +31,7 @@ func (p *All) Create(ctx context.Context, upstreams []*upstream.Fs, path string) return upstreams, nil } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *All) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/epall.go b/backend/union/policy/epall.go index 666820a29..caac0b17d 100644 --- a/backend/union/policy/epall.go +++ b/backend/union/policy/epall.go @@ -61,7 +61,7 @@ func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path strin return p.epall(ctx, upstreams, path) } -// ActionEntries is ACTION category policy but receving a set of candidate entries +// ActionEntries is ACTION category policy but receivng a set of candidate entries func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound @@ -86,7 +86,7 @@ func (p *EpAll) Create(ctx context.Context, upstreams []*upstream.Fs, path strin return upstreams, err } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *EpAll) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/epff.go b/backend/union/policy/epff.go index e294cdadb..ea4527087 100644 --- a/backend/union/policy/epff.go +++ b/backend/union/policy/epff.go @@ -61,7 +61,7 @@ func (p *EpFF) Action(ctx context.Context, upstreams []*upstream.Fs, path string return []*upstream.Fs{u}, err } -// ActionEntries is ACTION category policy but receving a set of candidate entries +// ActionEntries is ACTION category policy but receiving a set of candidate entries func (p *EpFF) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound @@ -86,7 +86,7 @@ func (p *EpFF) Create(ctx context.Context, upstreams []*upstream.Fs, path string return []*upstream.Fs{u}, err } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *EpFF) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound @@ -106,7 +106,7 @@ func (p *EpFF) Search(ctx context.Context, upstreams []*upstream.Fs, path string return p.epff(ctx, upstreams, path) } -// SearchEntries is SEARCH category policy but receving a set of candidate entries +// SearchEntries is SEARCH category policy but receiving a set of candidate entries func (p *EpFF) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/eplfs.go b/backend/union/policy/eplfs.go index 9b6107eb2..a2788138c 100644 --- a/backend/union/policy/eplfs.go +++ b/backend/union/policy/eplfs.go @@ -65,7 +65,7 @@ func (p *EpLfs) Action(ctx context.Context, upstreams []*upstream.Fs, path strin return []*upstream.Fs{u}, err } -// ActionEntries is ACTION category policy but receving a set of candidate entries +// ActionEntries is ACTION category policy but receiving a set of candidate entries func (p *EpLfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.ActionEntries(entries...) if err != nil { @@ -85,7 +85,7 @@ func (p *EpLfs) Create(ctx context.Context, upstreams []*upstream.Fs, path strin return []*upstream.Fs{u}, err } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *EpLfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.CreateEntries(entries...) if err != nil { @@ -107,7 +107,7 @@ func (p *EpLfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin return p.lfs(upstreams) } -// SearchEntries is SEARCH category policy but receving a set of candidate entries +// SearchEntries is SEARCH category policy but receiving a set of candidate entries func (p *EpLfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/eplno.go b/backend/union/policy/eplno.go index b81f85c31..bb74ba500 100644 --- a/backend/union/policy/eplno.go +++ b/backend/union/policy/eplno.go @@ -65,7 +65,7 @@ func (p *EpLno) Action(ctx context.Context, upstreams []*upstream.Fs, path strin return []*upstream.Fs{u}, err } -// ActionEntries is ACTION category policy but receving a set of candidate entries +// ActionEntries is ACTION category policy but receiving a set of candidate entries func (p *EpLno) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.ActionEntries(entries...) if err != nil { @@ -85,7 +85,7 @@ func (p *EpLno) Create(ctx context.Context, upstreams []*upstream.Fs, path strin return []*upstream.Fs{u}, err } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *EpLno) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.CreateEntries(entries...) if err != nil { @@ -107,7 +107,7 @@ func (p *EpLno) Search(ctx context.Context, upstreams []*upstream.Fs, path strin return p.lno(upstreams) } -// SearchEntries is SEARCH category policy but receving a set of candidate entries +// SearchEntries is SEARCH category policy but receiving a set of candidate entries func (p *EpLno) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/eplus.go b/backend/union/policy/eplus.go index ef8a963b0..000d23f7e 100644 --- a/backend/union/policy/eplus.go +++ b/backend/union/policy/eplus.go @@ -65,7 +65,7 @@ func (p *EpLus) Action(ctx context.Context, upstreams []*upstream.Fs, path strin return []*upstream.Fs{u}, err } -// ActionEntries is ACTION category policy but receving a set of candidate entries +// ActionEntries is ACTION category policy but receiving a set of candidate entries func (p *EpLus) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.ActionEntries(entries...) if err != nil { @@ -85,7 +85,7 @@ func (p *EpLus) Create(ctx context.Context, upstreams []*upstream.Fs, path strin return []*upstream.Fs{u}, err } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *EpLus) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.CreateEntries(entries...) if err != nil { @@ -107,7 +107,7 @@ func (p *EpLus) Search(ctx context.Context, upstreams []*upstream.Fs, path strin return p.lus(upstreams) } -// SearchEntries is SEARCH category policy but receving a set of candidate entries +// SearchEntries is SEARCH category policy but receiving a set of candidate entries func (p *EpLus) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/epmfs.go b/backend/union/policy/epmfs.go index 1370411c0..1a2625009 100644 --- a/backend/union/policy/epmfs.go +++ b/backend/union/policy/epmfs.go @@ -64,7 +64,7 @@ func (p *EpMfs) Action(ctx context.Context, upstreams []*upstream.Fs, path strin return []*upstream.Fs{u}, err } -// ActionEntries is ACTION category policy but receving a set of candidate entries +// ActionEntries is ACTION category policy but receiving a set of candidate entries func (p *EpMfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.ActionEntries(entries...) if err != nil { @@ -84,7 +84,7 @@ func (p *EpMfs) Create(ctx context.Context, upstreams []*upstream.Fs, path strin return []*upstream.Fs{u}, err } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *EpMfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.CreateEntries(entries...) if err != nil { @@ -106,7 +106,7 @@ func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin return p.mfs(upstreams) } -// SearchEntries is SEARCH category policy but receving a set of candidate entries +// SearchEntries is SEARCH category policy but receivng a set of candidate entries func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/eprand.go b/backend/union/policy/eprand.go index bc137ac78..ef223e7be 100644 --- a/backend/union/policy/eprand.go +++ b/backend/union/policy/eprand.go @@ -38,7 +38,7 @@ func (p *EpRand) Action(ctx context.Context, upstreams []*upstream.Fs, path stri return []*upstream.Fs{p.rand(upstreams)}, nil } -// ActionEntries is ACTION category policy but receving a set of candidate entries +// ActionEntries is ACTION category policy but receiving a set of candidate entries func (p *EpRand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.ActionEntries(entries...) if err != nil { @@ -56,7 +56,7 @@ func (p *EpRand) Create(ctx context.Context, upstreams []*upstream.Fs, path stri return []*upstream.Fs{p.rand(upstreams)}, nil } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *EpRand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.EpAll.CreateEntries(entries...) if err != nil { @@ -77,7 +77,7 @@ func (p *EpRand) Search(ctx context.Context, upstreams []*upstream.Fs, path stri return p.rand(upstreams), nil } -// SearchEntries is SEARCH category policy but receving a set of candidate entries +// SearchEntries is SEARCH category policy but receiving a set of candidate entries func (p *EpRand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/newest.go b/backend/union/policy/newest.go index 8d4fa8049..8ac8ff639 100644 --- a/backend/union/policy/newest.go +++ b/backend/union/policy/newest.go @@ -15,7 +15,7 @@ func init() { } // Newest policy picks the file / directory with the largest mtime -// It implies the existance of a path +// It implies the existence of a path type Newest struct { EpAll } @@ -93,7 +93,7 @@ func (p *Newest) Action(ctx context.Context, upstreams []*upstream.Fs, path stri return []*upstream.Fs{u}, err } -// ActionEntries is ACTION category policy but receving a set of candidate entries +// ActionEntries is ACTION category policy but receiving a set of candidate entries func (p *Newest) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound @@ -119,7 +119,7 @@ func (p *Newest) Create(ctx context.Context, upstreams []*upstream.Fs, path stri return []*upstream.Fs{u}, err } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *Newest) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound @@ -140,7 +140,7 @@ func (p *Newest) Search(ctx context.Context, upstreams []*upstream.Fs, path stri return p.newest(ctx, upstreams, path) } -// SearchEntries is SEARCH category policy but receving a set of candidate entries +// SearchEntries is SEARCH category policy but receiving a set of candidate entries func (p *Newest) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/policy.go b/backend/union/policy/policy.go index 0cb2d0007..19cee2e01 100644 --- a/backend/union/policy/policy.go +++ b/backend/union/policy/policy.go @@ -26,13 +26,13 @@ type Policy interface { // Search category policy, governing the access to files and directories Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) - // ActionEntries is ACTION category policy but receving a set of candidate entries + // ActionEntries is ACTION category policy but receiving a set of candidate entries ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) - // CreateEntries is CREATE category policy but receving a set of candidate entries + // CreateEntries is CREATE category policy but receiving a set of candidate entries CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) - // SearchEntries is SEARCH category policy but receving a set of candidate entries + // SearchEntries is SEARCH category policy but receiving a set of candidate entries SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) } diff --git a/backend/union/policy/rand.go b/backend/union/policy/rand.go index 6e3128ed7..5c900c0d6 100644 --- a/backend/union/policy/rand.go +++ b/backend/union/policy/rand.go @@ -35,7 +35,7 @@ func (p *Rand) Action(ctx context.Context, upstreams []*upstream.Fs, path string return []*upstream.Fs{p.rand(upstreams)}, nil } -// ActionEntries is ACTION category policy but receving a set of candidate entries +// ActionEntries is ACTION category policy but receiving a set of candidate entries func (p *Rand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.All.ActionEntries(entries...) if err != nil { @@ -53,7 +53,7 @@ func (p *Rand) Create(ctx context.Context, upstreams []*upstream.Fs, path string return []*upstream.Fs{p.rand(upstreams)}, nil } -// CreateEntries is CREATE category policy but receving a set of candidate entries +// CreateEntries is CREATE category policy but receiving a set of candidate entries func (p *Rand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { entries, err := p.All.CreateEntries(entries...) if err != nil { @@ -74,7 +74,7 @@ func (p *Rand) Search(ctx context.Context, upstreams []*upstream.Fs, path string return p.rand(upstreams), nil } -// SearchEntries is SEARCH category policy but receving a set of candidate entries +// SearchEntries is SEARCH category policy but receiving a set of candidate entries func (p *Rand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/upstream/upstream.go b/backend/union/upstream/upstream.go index 4538f6685..30cb1f983 100644 --- a/backend/union/upstream/upstream.go +++ b/backend/union/upstream/upstream.go @@ -100,7 +100,7 @@ func New(remote, root string, cacheTime time.Duration) (*Fs, error) { return f, err } -// WrapDirectory wraps a fs.Directory to include the info +// WrapDirectory wraps an fs.Directory to include the info // of the upstream Fs func (f *Fs) WrapDirectory(e fs.Directory) *Directory { if e == nil { @@ -112,7 +112,7 @@ func (f *Fs) WrapDirectory(e fs.Directory) *Directory { } } -// WrapObject wraps a fs.Object to include the info +// WrapObject wraps an fs.Object to include the info // of the upstream Fs func (f *Fs) WrapObject(o fs.Object) *Object { if o == nil { @@ -124,7 +124,7 @@ func (f *Fs) WrapObject(o fs.Object) *Object { } } -// WrapEntry wraps a fs.DirEntry to include the info +// WrapEntry wraps an fs.DirEntry to include the info // of the upstream Fs func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) { switch e.(type) { diff --git a/backend/webdav/odrvcookie/fetch.go b/backend/webdav/odrvcookie/fetch.go index 657971fad..88f715ac1 100644 --- a/backend/webdav/odrvcookie/fetch.go +++ b/backend/webdav/odrvcookie/fetch.go @@ -48,7 +48,7 @@ type SuccessResponseBody struct { Token string `xml:"RequestSecurityTokenResponse>RequestedSecurityToken>BinarySecurityToken"` } -// SharepointError holds a error response microsoft login +// SharepointError holds an error response microsoft login type SharepointError struct { XMLName xml.Name `xml:"Envelope"` Body ErrorResponseBody `xml:"Body"` @@ -58,7 +58,7 @@ func (e *SharepointError) Error() string { return fmt.Sprintf("%s: %s (%s)", e.Body.FaultCode, e.Body.Reason, e.Body.Detail) } -// ErrorResponseBody contains the body of a erroneous repsonse +// ErrorResponseBody contains the body of an erroneous response type ErrorResponseBody struct { XMLName xml.Name FaultCode string `xml:"Fault>Code>Subcode>Value"` diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go index af134daa6..3b9541538 100644 --- a/backend/yandex/yandex.go +++ b/backend/yandex/yandex.go @@ -200,12 +200,12 @@ func (f *Fs) setRoot(root string) { f.diskRoot = diskRoot } -// filePath returns a escaped file path (f.root, file) +// filePath returns an escaped file path (f.root, file) func (f *Fs) filePath(file string) string { return path.Join(f.diskRoot, file) } -// dirPath returns a escaped file path (f.root, file) ending with '/' +// dirPath returns an escaped file path (f.root, file) ending with '/' func (f *Fs) dirPath(file string) string { return path.Join(f.diskRoot, file) + "/" } @@ -502,7 +502,7 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) { if err = f.CreateDir(ctx, dirString); err != nil { if apiErr, ok := err.(*api.ErrorResponse); ok { - // allready exists + // already exists if apiErr.ErrorName != "DiskPathPointsToExistentDirectoryError" { // 2 if it fails then create all directories in the path from root. dirs := strings.Split(dirString, "/") //path separator diff --git a/cmd/cmount/mount.go b/cmd/cmount/mount.go index 1e39f773b..881542b2e 100644 --- a/cmd/cmount/mount.go +++ b/cmd/cmount/mount.go @@ -1,4 +1,4 @@ -// Package cmount implents a FUSE mounting system for rclone remotes. +// Package cmount implements a FUSE mounting system for rclone remotes. // // This uses the cgo based cgofuse library diff --git a/cmd/copyurl/copyurl.go b/cmd/copyurl/copyurl.go index 738b4a861..50e590117 100644 --- a/cmd/copyurl/copyurl.go +++ b/cmd/copyurl/copyurl.go @@ -33,7 +33,7 @@ var commandDefinition = &cobra.Command{ Download a URL's content and copy it to the destination without saving it in temporary storage. -Setting --auto-filename will cause the file name to be retreived from +Setting --auto-filename will cause the file name to be retrieved from the from URL (after any redirections) and used in the destination path. diff --git a/cmd/mount/mount.go b/cmd/mount/mount.go index 50d66060f..34da286c6 100644 --- a/cmd/mount/mount.go +++ b/cmd/mount/mount.go @@ -1,4 +1,4 @@ -// Package mount implents a FUSE mounting system for rclone remotes. +// Package mount implements a FUSE mounting system for rclone remotes. // +build linux,go1.13 darwin,go1.13 freebsd,go1.13 diff --git a/cmd/mount2/mount.go b/cmd/mount2/mount.go index b20b5a214..b2b13d306 100644 --- a/cmd/mount2/mount.go +++ b/cmd/mount2/mount.go @@ -1,4 +1,4 @@ -// Package mount implents a FUSE mounting system for rclone remotes. +// Package mount implements a FUSE mounting system for rclone remotes. // +build linux darwin,amd64 diff --git a/cmd/mountlib/rc_test.go b/cmd/mountlib/rc_test.go index 5f1e2a700..6654ed4e5 100644 --- a/cmd/mountlib/rc_test.go +++ b/cmd/mountlib/rc_test.go @@ -85,7 +85,7 @@ func TestRc(t *testing.T) { require.NoError(t, err) assert.Equal(t, int64(5), fi.Size()) - // FIXME the OS somtimes appears to be using the mount + // FIXME the OS sometimes appears to be using the mount // immediately after it appears so wait a moment time.Sleep(100 * time.Millisecond) diff --git a/cmd/serve/dlna/cds.go b/cmd/serve/dlna/cds.go index 1b600f203..14bbb49e0 100644 --- a/cmd/serve/dlna/cds.go +++ b/cmd/serve/dlna/cds.go @@ -181,7 +181,7 @@ func (cds *contentDirectoryService) readContainer(o object, host string) (ret [] // Given a list of nodes, separate them into potential media items and any associated resources (external subtitles, // for example.) // -// The result is a a slice of potential media nodes (in their original order) and a map containing associated +// The result is a slice of potential media nodes (in their original order) and a map containing associated // resources nodes of each media node, if any. func mediaWithResources(nodes vfs.Nodes) (vfs.Nodes, map[vfs.Node]vfs.Nodes) { media, mediaResources := vfs.Nodes{}, make(map[vfs.Node]vfs.Nodes) diff --git a/cmd/serve/dlna/dlna.go b/cmd/serve/dlna/dlna.go index 3030837ea..a6b71bf24 100644 --- a/cmd/serve/dlna/dlna.go +++ b/cmd/serve/dlna/dlna.go @@ -34,7 +34,7 @@ func init() { var Command = &cobra.Command{ Use: "dlna remote:path", Short: `Serve remote:path over DLNA`, - Long: `rclone serve dlna is a DLNA media server for media stored in a rclone remote. Many + Long: `rclone serve dlna is a DLNA media server for media stored in an rclone remote. Many devices, such as the Xbox and PlayStation, can automatically discover this server in the LAN and play audio/video from it. VLC is also supported. Service discovery uses UDP multicast packets (SSDP) and will thus only work on LANs. diff --git a/cmd/serve/httplib/serve/dir.go b/cmd/serve/httplib/serve/dir.go index c0801145f..835b9f67c 100644 --- a/cmd/serve/httplib/serve/dir.go +++ b/cmd/serve/httplib/serve/dir.go @@ -123,7 +123,7 @@ func (d *Directory) AddEntry(remote string, isDir bool) { }) } -// Error logs the error and if a ResponseWriter is given it writes a http.StatusInternalServerError +// Error logs the error and if a ResponseWriter is given it writes an http.StatusInternalServerError func Error(what interface{}, w http.ResponseWriter, text string, err error) { err = fs.CountError(err) fs.Errorf(what, "%s: %v", text, err) @@ -132,7 +132,7 @@ func Error(what interface{}, w http.ResponseWriter, text string, err error) { } } -// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and defailt is namedirfist/asc +// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfist/asc func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory { d.Sort = sortParm d.Order = orderParm diff --git a/cmd/serve/proxy/proxy.go b/cmd/serve/proxy/proxy.go index 56e4e1e1c..bda2c7343 100644 --- a/cmd/serve/proxy/proxy.go +++ b/cmd/serve/proxy/proxy.go @@ -89,7 +89,7 @@ that since |_obscure| is set to |pass|, rclone will obscure the |pass| parameter before creating the backend (which is required for sftp backends). -The progam can manipulate the supplied |user| in any way, for example +The program can manipulate the supplied |user| in any way, for example to make proxy to many different sftp backends, you could make the |user| be |user@example.com| and then set the |host| to |example.com| in the output and the user to |user|. For security you'd probably want diff --git a/cmd/serve/sftp/connection.go b/cmd/serve/sftp/connection.go index 23e375116..21c5c6dfa 100644 --- a/cmd/serve/sftp/connection.go +++ b/cmd/serve/sftp/connection.go @@ -51,7 +51,7 @@ type conn struct { what string } -// execCommand implements an extrememly limited number of commands to +// execCommand implements an extremely limited number of commands to // interoperate with the rclone sftp backend func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) { binary, args := command, "" diff --git a/cmd/tree/tree.go b/cmd/tree/tree.go index 6babdc3c6..d199e2c01 100644 --- a/cmd/tree/tree.go +++ b/cmd/tree/tree.go @@ -143,7 +143,7 @@ func Tree(fsrc fs.Fs, outFile io.Writer, opts *tree.Options) error { return nil } -// FileInfo maps a fs.DirEntry into an os.FileInfo +// FileInfo maps an fs.DirEntry into an os.FileInfo type FileInfo struct { entry fs.DirEntry } diff --git a/docs/content/cache.md b/docs/content/cache.md index 448b787bc..f4e67e324 100644 --- a/docs/content/cache.md +++ b/docs/content/cache.md @@ -222,7 +222,7 @@ There are a couple of issues with Windows `mount` functionality that still requi It should be considered as experimental thus far as fixes come in for this OS. Most of the issues seem to be related to the difference between filesystems -on Linux flavors and Windows as cache is heavily dependant on them. +on Linux flavors and Windows as cache is heavily dependent on them. Any reports or feedback on how cache behaves on this OS is greatly appreciated. diff --git a/docs/content/changelog.md b/docs/content/changelog.md index 85c03504b..72ed92ecd 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -376,7 +376,7 @@ date: "2020-02-01" * march: Fix checking sub-directories when using `--no-traverse` (buengese) * rc * Fix unmarshalable http.AuthFn in options and put in test for marshalability (Nick Craig-Wood) - * Move job expire flags to rc to fix initalization problem (Nick Craig-Wood) + * Move job expire flags to rc to fix initialization problem (Nick Craig-Wood) * Fix `--loopback` with rc/list and others (Nick Craig-Wood) * rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood) * rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood) @@ -515,7 +515,7 @@ date: "2020-02-01" * Onedrive * More accurately check if root is found (Cnly) * S3 - * Suppport S3 Accelerated endpoints with `--s3-use-accelerate-endpoint` (Nick Craig-Wood) + * Support S3 Accelerated endpoints with `--s3-use-accelerate-endpoint` (Nick Craig-Wood) * Add config info for Wasabi's EU Central endpoint (Robert Marko) * Make SetModTime work for GLACIER while syncing (Philip Harvey) * SFTP @@ -1295,18 +1295,18 @@ Point release to fix hubic and azureblob backends. * Rclone no longer has any working keys - disable integration tests * Implement DirChangeNotify to notify cache/vfs/mount of changes * Azureblob - * Don't check for bucket/container presense if listing was OK + * Don't check for bucket/container presence if listing was OK * this makes rclone do one less request per invocation * Improve accounting for chunked uploads * Backblaze B2 - * Don't check for bucket/container presense if listing was OK + * Don't check for bucket/container presence if listing was OK * this makes rclone do one less request per invocation * Box * Improve accounting for chunked uploads * Dropbox * Fix custom oauth client parameters * Google Cloud Storage - * Don't check for bucket/container presense if listing was OK + * Don't check for bucket/container presence if listing was OK * this makes rclone do one less request per invocation * Google Drive * Migrate to api v3 (Fabian Möller) @@ -1329,13 +1329,13 @@ Point release to fix hubic and azureblob backends. * Pcloud * Remove unused chunked upload flag and code * Qingstor - * Don't check for bucket/container presense if listing was OK + * Don't check for bucket/container presence if listing was OK * this makes rclone do one less request per invocation * S3 * Support hashes for multipart files (Chris Redekop) * Initial support for IBM COS (S3) (Giri Badanahatti) * Update docs to discourage use of v2 auth with CEPH and others - * Don't check for bucket/container presense if listing was OK + * Don't check for bucket/container presence if listing was OK * this makes rclone do one less request per invocation * Fix server side copy and set modtime on files with + in * SFTP @@ -1350,7 +1350,7 @@ Point release to fix hubic and azureblob backends. * Fix refresh of authentication token * in v1.39 a bug was introduced which ignored new tokens - this fixes it * Fix extra HEAD transaction when uploading a new file - * Don't check for bucket/container presense if listing was OK + * Don't check for bucket/container presence if listing was OK * this makes rclone do one less request per invocation * Webdav * Add new time formats to support mydrive.ch and others @@ -1375,7 +1375,7 @@ Point release to fix hubic and azureblob backends. * curl install for rclone (Filip Bartodziej) * --stats now shows percentage, size, rate and ETA in condensed form (Ishuah Kariuki) * --exclude-if-present to exclude a directory if a file is present (Iakov Davydov) - * rmdirs: add --leave-root flag (lewpam) + * rmdirs: add --leave-root flag (lewapm) * move: add --delete-empty-src-dirs flag to remove dirs after move (Ishuah Kariuki) * Add --dump flag, introduce --dump requests, responses and remove --dump-auth, --dump-filters * Obscure X-Auth-Token: from headers when dumping too @@ -2086,7 +2086,7 @@ Point release to fix hubic and azureblob backends. * New features * Amazon Drive support * Oauth support redone - fix many bugs and improve usability - * Use "golang.org/x/oauth2" as oauth libary of choice + * Use "golang.org/x/oauth2" as oauth library of choice * Improve oauth usability for smoother initial signup * drive, googlecloudstorage: optionally use auto config for the oauth token * Implement --dump-headers and --dump-bodies debug flags diff --git a/docs/content/jottacloud.md b/docs/content/jottacloud.md index acb278625..4e151cda6 100644 --- a/docs/content/jottacloud.md +++ b/docs/content/jottacloud.md @@ -7,7 +7,7 @@ date: "2018-08-07" Jottacloud ----------------------------------------- -Jottacoud is a cloud storage service provider from a Norwegian company, using its own datacenters in Norway. +Jottacloud is a cloud storage service provider from a Norwegian company, using its own datacenters in Norway. In addition to the official service at [jottacloud.com](https://www.jottacloud.com/), there are also several whitelabel versions which should work with this backend. diff --git a/fs/accounting/accounting.go b/fs/accounting/accounting.go index 36d66810f..298b91243 100644 --- a/fs/accounting/accounting.go +++ b/fs/accounting/accounting.go @@ -359,7 +359,7 @@ func (acc *Account) progress() (bytes, size int64) { } // speed returns the speed of the current file transfer -// in bytes per second, as well a an exponentially weighted moving average +// in bytes per second, as well an exponentially weighted moving average // If no read has completed yet, 0 is returned for both values. func (acc *Account) speed() (bps, current float64) { if acc == nil { diff --git a/fs/accounting/stats_groups.go b/fs/accounting/stats_groups.go index 02ce96afa..cffa4ac34 100644 --- a/fs/accounting/stats_groups.go +++ b/fs/accounting/stats_groups.go @@ -166,7 +166,7 @@ Returns the following values: "bytes": total transferred bytes for this file, "checked": if the transfer is only checked (skipped, deleted), "timestamp": integer representing millisecond unix epoch, - "error": string description of the error (empty if successfull), + "error": string description of the error (empty if successful), "jobid": id of the job that this transfer belongs to } ] diff --git a/fs/cache/cache.go b/fs/cache/cache.go index 8b39051c0..731477bd8 100644 --- a/fs/cache/cache.go +++ b/fs/cache/cache.go @@ -37,7 +37,7 @@ func addMapping(fsString, canonicalName string) { mu.Unlock() } -// GetFn gets a fs.Fs named fsString either from the cache or creates +// GetFn gets an fs.Fs named fsString either from the cache or creates // it afresh with the create function func GetFn(fsString string, create func(fsString string) (fs.Fs, error)) (f fs.Fs, err error) { fsString = canonicalize(fsString) @@ -77,7 +77,7 @@ func Unpin(f fs.Fs) { c.Pin(fs.ConfigString(f)) } -// Get gets a fs.Fs named fsString either from the cache or creates it afresh +// Get gets an fs.Fs named fsString either from the cache or creates it afresh func Get(fsString string) (f fs.Fs, err error) { return GetFn(fsString, fs.NewFs) } @@ -89,7 +89,7 @@ func Put(fsString string, f fs.Fs) { addMapping(fsString, canonicalName) } -// Clear removes everything from the cahce +// Clear removes everything from the cache func Clear() { c.Clear() } diff --git a/fs/chunkedreader/chunkedreader.go b/fs/chunkedreader/chunkedreader.go index 4e0065947..313ffac14 100644 --- a/fs/chunkedreader/chunkedreader.go +++ b/fs/chunkedreader/chunkedreader.go @@ -19,7 +19,7 @@ var ( // ChunkedReader is a reader for a Object with the possibility // of reading the source in chunks of given size // -// A initialChunkSize of <= 0 will disable chunked reading. +// An initialChunkSize of <= 0 will disable chunked reading. type ChunkedReader struct { ctx context.Context mu sync.Mutex // protects following fields @@ -36,7 +36,7 @@ type ChunkedReader struct { // New returns a ChunkedReader for the Object. // -// A initialChunkSize of <= 0 will disable chunked reading. +// An initialChunkSize of <= 0 will disable chunked reading. // If maxChunkSize is greater than initialChunkSize, the chunk size will be // doubled after each chunk read with a maximun of maxChunkSize. // A Seek or RangeSeek will reset the chunk size to it's initial value diff --git a/fs/config.go b/fs/config.go index 2340f3acb..ba6bb2b1c 100644 --- a/fs/config.go +++ b/fs/config.go @@ -156,7 +156,7 @@ func NewConfig() *ConfigInfo { return c } -// ConfigToEnv converts an config section and name, eg ("myremote", +// ConfigToEnv converts a config section and name, eg ("myremote", // "ignore-size") into an environment name // "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE" func ConfigToEnv(section, name string) string { diff --git a/fs/filter/filter.go b/fs/filter/filter.go index c083f2349..fb84c496c 100644 --- a/fs/filter/filter.go +++ b/fs/filter/filter.go @@ -426,7 +426,7 @@ func (f *Filter) IncludeDirectory(ctx context.Context, fs fs.Fs) func(string) (b } // DirContainsExcludeFile checks if exclude file is present in a -// directroy. If fs is nil, it works properly if ExcludeFile is an +// directory. If fs is nil, it works properly if ExcludeFile is an // empty string (for testing). func (f *Filter) DirContainsExcludeFile(ctx context.Context, fremote fs.Fs, remote string) (bool, error) { if len(f.Opt.ExcludeFile) > 0 { diff --git a/fs/fs.go b/fs/fs.go index 93dc2e38c..0727c705f 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -1079,7 +1079,7 @@ type CommandHelp struct { Opts map[string]string // maps option name to a single line help } -// Commander is an iterface to wrap the Command function +// Commander is an interface to wrap the Command function type Commander interface { // Command the backend to run a named command // @@ -1137,7 +1137,7 @@ func UnWrapObject(o Object) Object { return o } -// Find looks for an RegInfo object for the name passed in. The name +// Find looks for a RegInfo object for the name passed in. The name // can be either the Name or the Prefix. // // Services are looked up in the config file diff --git a/fs/fserrors/error.go b/fs/fserrors/error.go index 5d0b85dac..76922e55c 100644 --- a/fs/fserrors/error.go +++ b/fs/fserrors/error.go @@ -360,7 +360,7 @@ func Cause(cause error) (retriable bool, err error) { } // retriableErrorStrings is a list of phrases which when we find it -// in an an error, we know it is a networking error which should be +// in an error, we know it is a networking error which should be // retried. // // This is incredibly ugly - if only errors.Cause worked for all diff --git a/fs/fshttp/http.go b/fs/fshttp/http.go index c72444998..a4e0ee54d 100644 --- a/fs/fshttp/http.go +++ b/fs/fshttp/http.go @@ -215,7 +215,7 @@ func NewClient(ci *fs.ConfigInfo) *http.Client { return client } -// Transport is a our http Transport which wraps an http.Transport +// Transport is our http Transport which wraps an http.Transport // * Sets the User Agent // * Does logging type Transport struct { diff --git a/fs/log/caller_hook.go b/fs/log/caller_hook.go index bebdb1b8c..7a72999e4 100644 --- a/fs/log/caller_hook.go +++ b/fs/log/caller_hook.go @@ -15,7 +15,7 @@ type CallerHook struct { levels []logrus.Level } -// NewCallerHook use to make an hook +// NewCallerHook use to make a hook func NewCallerHook(levels ...logrus.Level) logrus.Hook { hook := CallerHook{ Field: "source", @@ -39,7 +39,7 @@ func (h *CallerHook) Fire(entry *logrus.Entry) error { return nil } -// findCaller ignores the caller relevent to logrus or fslog then find out the exact caller +// findCaller ignores the caller relevant to logrus or fslog then find out the exact caller func findCaller(skip int) string { file := "" line := 0 diff --git a/fs/operations/rc.go b/fs/operations/rc.go index 346a557e0..291124902 100644 --- a/fs/operations/rc.go +++ b/fs/operations/rc.go @@ -418,7 +418,7 @@ command: rclone backend noop . -o echo=yes -o blue path1 path2 -Note that arguments must be preceeded by the "-a" flag +Note that arguments must be preceded by the "-a" flag See the [backend](/commands/rclone_backend/) command for more information. `, diff --git a/fs/operations/reopen_test.go b/fs/operations/reopen_test.go index 0faecb812..d7f7fbe94 100644 --- a/fs/operations/reopen_test.go +++ b/fs/operations/reopen_test.go @@ -19,7 +19,7 @@ var _ io.ReadCloser = (*ReOpen)(nil) var errorTestError = errors.New("test error") -// this is a wrapper for an mockobject with a custom Open function +// this is a wrapper for a mockobject with a custom Open function // // breaks indicate the number of bytes to read before returning an // error diff --git a/fs/options.go b/fs/options.go index 399b0a561..e8310045f 100644 --- a/fs/options.go +++ b/fs/options.go @@ -141,7 +141,7 @@ func (o *RangeOption) Decode(size int64) (offset, limit int64) { func FixRangeOption(options []OpenOption, size int64) { if size == 0 { // if size 0 then remove RangeOption~s - // replacing with an NullOptions~s which won't be rendered + // replacing with a NullOptions~s which won't be rendered for i := range options { if _, ok := options[i].(*RangeOption); ok { options[i] = NullOption{} diff --git a/fs/rc/cache.go b/fs/rc/cache.go index f3b0b7d7b..d821f5c09 100644 --- a/fs/rc/cache.go +++ b/fs/rc/cache.go @@ -7,7 +7,7 @@ import ( "github.com/rclone/rclone/fs/cache" ) -// GetFsNamed gets a fs.Fs named fsName either from the cache or creates it afresh +// GetFsNamed gets an fs.Fs named fsName either from the cache or creates it afresh func GetFsNamed(in Params, fsName string) (f fs.Fs, err error) { fsString, err := in.GetString(fsName) if err != nil { @@ -17,7 +17,7 @@ func GetFsNamed(in Params, fsName string) (f fs.Fs, err error) { return cache.Get(fsString) } -// GetFs gets a fs.Fs named "fs" either from the cache or creates it afresh +// GetFs gets an fs.Fs named "fs" either from the cache or creates it afresh func GetFs(in Params) (f fs.Fs, err error) { return GetFsNamed(in, "fs") } diff --git a/fs/rc/jobs/job.go b/fs/rc/jobs/job.go index 5e5909fe6..c5a423fac 100644 --- a/fs/rc/jobs/job.go +++ b/fs/rc/jobs/job.go @@ -16,7 +16,7 @@ import ( "github.com/rclone/rclone/fs/rc" ) -// Job describes a asynchronous task started via the rc package +// Job describes an asynchronous task started via the rc package type Job struct { mu sync.Mutex ID int64 `json:"id"` diff --git a/fs/rc/webgui.go b/fs/rc/webgui.go index 9dd4f44d7..9b09d5c14 100644 --- a/fs/rc/webgui.go +++ b/fs/rc/webgui.go @@ -202,7 +202,7 @@ func unzip(src, dest string) (err error) { return nil } -func exists(path string) (existance bool, stat os.FileInfo, err error) { +func exists(path string) (existence bool, stat os.FileInfo, err error) { stat, err = os.Stat(path) if err == nil { return true, stat, nil diff --git a/fs/sync/pipe.go b/fs/sync/pipe.go index 5e2b39b01..08b40c740 100644 --- a/fs/sync/pipe.go +++ b/fs/sync/pipe.go @@ -76,7 +76,7 @@ func (p *pipe) Pop() interface{} { return item } -// Put an pair into the pipe +// Put a pair into the pipe // // It returns ok = false if the context was cancelled // diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go index d082df5f7..d8701ee61 100644 --- a/fstest/fstests/fstests.go +++ b/fstest/fstests/fstests.go @@ -616,7 +616,7 @@ func Run(t *testing.T, opt *Opt) { } }) - // TestFsNewObjectNotFound tests not finding a object + // TestFsNewObjectNotFound tests not finding an object t.Run("FsNewObjectNotFound", func(t *testing.T) { skipIfNotOk(t) // Object in an existing directory diff --git a/fstest/mockobject/mockobject.go b/fstest/mockobject/mockobject.go index a444d0617..dcf04792d 100644 --- a/fstest/mockobject/mockobject.go +++ b/fstest/mockobject/mockobject.go @@ -102,7 +102,7 @@ type ContentMockObject struct { unknownSize bool } -// WithContent returns a fs.Object with the given content. +// WithContent returns an fs.Object with the given content. func (o Object) WithContent(content []byte, mode SeekMode) *ContentMockObject { return &ContentMockObject{ Object: o, diff --git a/fstest/test_all/test_all.go b/fstest/test_all/test_all.go index a298cc100..b3ab7f35d 100644 --- a/fstest/test_all/test_all.go +++ b/fstest/test_all/test_all.go @@ -48,7 +48,7 @@ var ( // if matches then is definitely OK in the shell var shellOK = regexp.MustCompile("^[A-Za-z0-9./_:-]+$") -// converts a argv style input into a shell command +// converts an argv style input into a shell command func toShell(args []string) (result string) { for _, arg := range args { if result != "" { diff --git a/lib/atexit/atexit.go b/lib/atexit/atexit.go index 59b1a922e..1976d5a2a 100644 --- a/lib/atexit/atexit.go +++ b/lib/atexit/atexit.go @@ -57,7 +57,7 @@ func Unregister(handle FnHandle) { delete(fns, handle) } -// IgnoreSignals disables the signal handler and prevents Run from beeing executed automatically +// IgnoreSignals disables the signal handler and prevents Run from being executed automatically func IgnoreSignals() { registerOnce.Do(func() {}) if exitChan != nil { diff --git a/lib/bucket/bucket.go b/lib/bucket/bucket.go index 2ebc14d33..a9c63b83d 100644 --- a/lib/bucket/bucket.go +++ b/lib/bucket/bucket.go @@ -88,7 +88,7 @@ func (c *Cache) Create(bucket string, create CreateFn, exists ExistsFn) (err err c.mu.Lock() defer c.mu.Unlock() - // if have exists fuction and bucket has been deleted, check + // if have exists function and bucket has been deleted, check // it still exists if created, ok := c.status[bucket]; ok && !created && exists != nil { found, err := exists() diff --git a/lib/cache/cache.go b/lib/cache/cache.go index 7fa66392f..268ce4e88 100644 --- a/lib/cache/cache.go +++ b/lib/cache/cache.go @@ -95,7 +95,7 @@ func (c *Cache) Unpin(key string) { c.addPin(key, -1) } -// Put puts an value named key into the cache +// Put puts a value named key into the cache func (c *Cache) Put(key string, value interface{}) { c.mu.Lock() defer c.mu.Unlock() @@ -159,7 +159,7 @@ func (c *Cache) cacheExpire() { } } -// Clear removes everything from the cahce +// Clear removes everything from the cache func (c *Cache) Clear() { c.mu.Lock() for k := range c.cache { diff --git a/lib/encoder/encoder.go b/lib/encoder/encoder.go index 9d2019a99..29a82feba 100644 --- a/lib/encoder/encoder.go +++ b/lib/encoder/encoder.go @@ -2,7 +2,7 @@ Translate file names for usage on restrictive storage systems The restricted set of characters are mapped to a unicode equivalent version -(most to their FULLWIDTH variant) to increase compatability with other +(most to their FULLWIDTH variant) to increase compatibility with other storage systems. See: http://unicode-search.net/unicode-namesearch.pl?term=FULLWIDTH diff --git a/lib/encoder/internal/gen/main.go b/lib/encoder/internal/gen/main.go index 990e06616..f5ff57b26 100644 --- a/lib/encoder/internal/gen/main.go +++ b/lib/encoder/internal/gen/main.go @@ -600,7 +600,7 @@ func runePos(r rune, s []rune) int { return -1 } -// quotedToString returns a string for the chars slice where a encoder.QuoteRune is +// quotedToString returns a string for the chars slice where an encoder.QuoteRune is // inserted before a char[i] when quoted[i] is true. func quotedToString(chars []rune, quoted []bool) string { var out strings.Builder diff --git a/lib/readers/repeatable.go b/lib/readers/repeatable.go index bb308b3e7..e61b083d8 100644 --- a/lib/readers/repeatable.go +++ b/lib/readers/repeatable.go @@ -82,7 +82,7 @@ func NewRepeatableReaderSized(r io.Reader, size int) *RepeatableReader { } // NewRepeatableLimitReader create new repeatable reader from Reader r -// with an initial buffer of size wrapped in a io.LimitReader to read +// with an initial buffer of size wrapped in an io.LimitReader to read // only size. func NewRepeatableLimitReader(r io.Reader, size int) *RepeatableReader { return NewRepeatableReaderSized(io.LimitReader(r, int64(size)), size) @@ -98,7 +98,7 @@ func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader { } // NewRepeatableLimitReaderBuffer create new repeatable reader from -// Reader r and buf wrapped in a io.LimitReader to read only size. +// Reader r and buf wrapped in an io.LimitReader to read only size. func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader { return NewRepeatableReaderBuffer(io.LimitReader(r, size), buf) } diff --git a/lib/readers/repeatable_test.go b/lib/readers/repeatable_test.go index 20c6e5680..d6a4c560b 100644 --- a/lib/readers/repeatable_test.go +++ b/lib/readers/repeatable_test.go @@ -90,7 +90,7 @@ func TestRepeatableReader(t *testing.T) { assert.Nil(t, err) require.Equal(t, 2, int(pos)) - // Should read from seek postion and past it + // Should read from seek position and past it dst = make([]byte, 5) n, err = io.ReadFull(r, dst) assert.Nil(t, err) diff --git a/lib/rest/rest.go b/lib/rest/rest.go index 92edc64fe..7ea7b6505 100644 --- a/lib/rest/rest.go +++ b/lib/rest/rest.go @@ -111,7 +111,7 @@ func (api *Client) SetUserPass(UserName, Password string) *Client { return api } -// SetCookie creates an Cookies Header for all requests with the supplied +// SetCookie creates a Cookies Header for all requests with the supplied // cookies passed in. // All cookies have to be supplied at once, all cookies will be overwritten // on a new call to the method @@ -407,7 +407,7 @@ func (api *Client) CallJSON(ctx context.Context, opts *Opts, request interface{} return api.callCodec(ctx, opts, request, response, json.Marshal, DecodeJSON, "application/json") } -// CallXML runs Call and decodes the body as a XML object into response (if not nil) +// CallXML runs Call and decodes the body as an XML object into response (if not nil) // // If request is not nil then it will be XML encoded as the body of the request // diff --git a/vfs/file.go b/vfs/file.go index 41ffc6784..3f9583ab6 100644 --- a/vfs/file.go +++ b/vfs/file.go @@ -54,7 +54,7 @@ type File struct { appendMode bool // file was opened with O_APPEND sys interface{} // user defined info to be attached here - muRW sync.Mutex // synchonize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove + muRW sync.Mutex // synchronize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove } // newFile creates a new File diff --git a/vfs/file_test.go b/vfs/file_test.go index f74788358..4de96273e 100644 --- a/vfs/file_test.go +++ b/vfs/file_test.go @@ -300,7 +300,7 @@ func testFileRename(t *testing.T, mode vfscommon.CacheMode) { } // now try renaming it with the file open - // first open it and write to it but dont close it + // first open it and write to it but don't close it fd, err := file.Open(os.O_WRONLY | os.O_TRUNC) require.NoError(t, err) newContents := []byte("this is some new contents") diff --git a/vfs/read.go b/vfs/read.go index c44d9118f..532728b8a 100644 --- a/vfs/read.go +++ b/vfs/read.go @@ -117,7 +117,7 @@ func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) { fh.hash = nil if !reopen { ar := fh.r.GetAsyncReader() - // try to fullfill the seek with buffer discard + // try to fulfill the seek with buffer discard if ar != nil && ar.SkipBytes(int(offset-fh.offset)) { fh.offset = offset return nil @@ -252,7 +252,7 @@ func waitSequential(what string, remote string, cond *sync.Cond, maxWait time.Du // Implementation of ReadAt - call with lock held func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) { // defer log.Trace(fh.remote, "p[%d], off=%d", len(p), off)("n=%d, err=%v", &n, &err) - err = fh.openPending() // FIXME pending open could be more efficient in the presense of seek (and retries) + err = fh.openPending() // FIXME pending open could be more efficient in the presence of seek (and retries) if err != nil { return 0, err } diff --git a/vfs/read_write.go b/vfs/read_write.go index 381f84471..8f6e2f39c 100644 --- a/vfs/read_write.go +++ b/vfs/read_write.go @@ -105,7 +105,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) { } } - // try to open a exising cache file + // try to open an existing cache file fd, err = file.OpenFile(fh.file.osPath(), cacheFileOpenFlags&^os.O_CREATE, 0600) if os.IsNotExist(err) { // cache file does not exist, so need to fetch it if we have an object to fetch @@ -151,7 +151,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) { } } // Windows doesn't seem to deal well with O_TRUNC and - // certain access modes so so truncate the file if it + // certain access modes so truncate the file if it // exists in these cases. if runtime.GOOS == "windows" && fh.flags&os.O_APPEND != 0 { cacheFileOpenFlags &^= os.O_TRUNC diff --git a/vfs/vfscache/vfscache_test.go b/vfs/vfscache/vfscache_test.go index 771d5fb64..1980871a7 100644 --- a/vfs/vfscache/vfscache_test.go +++ b/vfs/vfscache/vfscache_test.go @@ -162,7 +162,7 @@ func TestCacheNew(t *testing.T) { // try purging with file closed c.purgeOld(10 * time.Second) - // ...nothing should happend + // ...nothing should happen _, err = os.Stat(p) assert.NoError(t, err) diff --git a/vfs/vfscommon/options.go b/vfs/vfscommon/options.go index 4ac2af361..24c99b0cd 100644 --- a/vfs/vfscommon/options.go +++ b/vfs/vfscommon/options.go @@ -42,7 +42,7 @@ var DefaultOpt = Options{ ReadOnly: false, Umask: 0, UID: ^uint32(0), // these values instruct WinFSP-FUSE to use the current user - GID: ^uint32(0), // overriden for non windows in mount_unix.go + GID: ^uint32(0), // overridden for non windows in mount_unix.go DirPerms: os.FileMode(0777), FilePerms: os.FileMode(0666), CacheMode: CacheModeOff, diff --git a/vfs/write.go b/vfs/write.go index dd45fdcfd..e845a3f24 100644 --- a/vfs/write.go +++ b/vfs/write.go @@ -192,7 +192,7 @@ func (fh *WriteFileHandle) close() (err error) { fh.file.delWriter(fh, false) fh.file.finishWriterClose() }() - // If file not opened and not safe to truncate then then leave file intact + // If file not opened and not safe to truncate then leave file intact if !fh.opened && !fh.safeToTruncate() { return nil }