From 041b201abddd66e3ce8eea66e2779a0315cbae28 Mon Sep 17 00:00:00 2001 From: Martin Michlmayr Date: Mon, 25 May 2020 14:05:53 +0800 Subject: [PATCH] doc: fix typos throughout docs and code --- MAINTAINERS.md | 2 +- backend/azureblob/azureblob.go | 2 +- backend/cache/cache.go | 4 ++-- backend/crypt/cipher.go | 2 +- backend/drive/drive.go | 4 ++-- backend/fichier/fichier.go | 2 +- backend/fichier/object.go | 2 +- backend/jottacloud/jottacloud.go | 2 +- backend/koofr/koofr.go | 4 ++-- backend/local/local.go | 2 +- backend/mega/mega.go | 2 +- backend/opendrive/opendrive.go | 2 +- backend/qingstor/upload.go | 2 +- backend/seafile/object.go | 2 +- backend/seafile/seafile.go | 2 +- backend/seafile/webapi.go | 2 +- backend/sftp/sftp.go | 2 +- backend/sugarsync/sugarsync.go | 2 +- backend/tardigrade/fs.go | 2 +- backend/tardigrade/object.go | 2 +- backend/union/entry.go | 2 +- backend/union/union.go | 2 +- backend/union/upstream/upstream.go | 2 +- backend/webdav/api/types.go | 2 +- bin/check-merged.go | 2 +- cmd/backend/backend.go | 2 +- cmd/cmd.go | 4 ++-- cmd/config/config.go | 4 ++-- cmd/mountlib/mount.go | 2 +- docs/content/swift.md | 2 +- fs/accounting/accounting.go | 2 +- fs/chunkedreader/chunkedreader.go | 2 +- fs/fs.go | 8 ++++---- fs/operations/operations.go | 6 +++--- fs/rc/params.go | 2 +- fs/sync/sync.go | 2 +- lib/encoder/encoder.go | 2 +- lib/pacer/pacer.go | 4 ++-- vfs/file.go | 4 ++-- 39 files changed, 51 insertions(+), 51 deletions(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 9e5a06720..f2c8fe7e6 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -33,7 +33,7 @@ Rclone uses the labels like this: * `duplicate` - normally close these and ask the user to subscribe to the original * `enhancement: new remote` - a new rclone backend * `enhancement` - a new feature -* `FUSE` - do do with `rclone mount` command +* `FUSE` - to do with `rclone mount` command * `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project * `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project * `IMPORTANT` - note to maintainers not to forget to fix this for the release diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index ea760e0b5..acf9190cc 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -1455,7 +1455,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op httpHeaders.ContentType = fs.MimeType(ctx, o) // Compute the Content-MD5 of the file, for multiparts uploads it // will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header - // Note: If multipart, a MD5 checksum will also be computed for each uploaded block + // Note: If multipart, an MD5 checksum will also be computed for each uploaded block // in order to validate its integrity during transport if !o.fs.opt.DisableCheckSum { if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" { diff --git a/backend/cache/cache.go b/backend/cache/cache.go index 98ebe79b0..1a7332721 100644 --- a/backend/cache/cache.go +++ b/backend/cache/cache.go @@ -87,7 +87,7 @@ func init() { Advanced: true, }, { Name: "plex_insecure", - Help: "Skip all certificate verifications when connecting to the Plex server", + Help: "Skip all certificate verification when connecting to the Plex server", Advanced: true, }, { Name: "chunk_size", @@ -339,7 +339,7 @@ func parseRootPath(path string) (string, error) { return strings.Trim(path, "/"), nil } -// NewFs constructs a Fs from the path, container:path +// NewFs constructs an Fs from the path, container:path func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) diff --git a/backend/crypt/cipher.go b/backend/crypt/cipher.go index 810225fd5..94f499781 100644 --- a/backend/crypt/cipher.go +++ b/backend/crypt/cipher.go @@ -559,7 +559,7 @@ func (n *nonce) increment() { n.carry(0) } -// add an uint64 to the nonce +// add a uint64 to the nonce func (n *nonce) add(x uint64) { carry := uint16(0) for i := 0; i < 8; i++ { diff --git a/backend/drive/drive.go b/backend/drive/drive.go index edc5df5da..0a9066060 100755 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -263,7 +263,7 @@ videos. Setting this flag will cause Google photos and videos to return a blank MD5 checksum. -Google photos are identifed by being in the "photos" space. +Google photos are identified by being in the "photos" space. Corrupted checksums are caused by Google modifying the image/video but not updating the checksum.`, @@ -2074,7 +2074,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { } dstDir := dirs[0] for _, srcDir := range dirs[1:] { - // list the the objects + // list the objects infos := []*drive.File{} _, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool { infos = append(infos, info) diff --git a/backend/fichier/fichier.go b/backend/fichier/fichier.go index 0349a2198..26dba8340 100644 --- a/backend/fichier/fichier.go +++ b/backend/fichier/fichier.go @@ -298,7 +298,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // Put in to the remote path with the modTime given of the given size // -// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // diff --git a/backend/fichier/object.go b/backend/fichier/object.go index 7a152b9ab..95c4fe783 100644 --- a/backend/fichier/object.go +++ b/backend/fichier/object.go @@ -101,7 +101,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo // Update in to the object with the modTime given of the given size // -// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go index 3941fadcd..1103c3926 100644 --- a/backend/jottacloud/jottacloud.go +++ b/backend/jottacloud/jottacloud.go @@ -1210,7 +1210,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read // The cleanup function should be called when out is finished with // regardless of whether this function returned an error or not. func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) { - // we need a MD5 + // we need an MD5 md5Hasher := md5.New() // use the teeReader to write to the local file AND calculate the MD5 while doing so teeReader := io.TeeReader(in, md5Hasher) diff --git a/backend/koofr/koofr.go b/backend/koofr/koofr.go index 3d218569f..32b7374ca 100644 --- a/backend/koofr/koofr.go +++ b/backend/koofr/koofr.go @@ -79,7 +79,7 @@ type Options struct { Enc encoder.MultiEncoder `config:"encoding"` } -// A Fs is a representation of a remote Koofr Fs +// An Fs is a representation of a remote Koofr Fs type Fs struct { name string mountID string @@ -250,7 +250,7 @@ func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } -// fullPath constructs a full, absolute path from a Fs root relative path, +// fullPath constructs a full, absolute path from an Fs root relative path, func (f *Fs) fullPath(part string) string { return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part)) } diff --git a/backend/local/local.go b/backend/local/local.go index 86de84fa4..44a7fa7a4 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -1164,7 +1164,7 @@ func (o *Object) setMetadata(info os.FileInfo) { } } -// Stat a Object into info +// Stat an Object into info func (o *Object) lstat() error { info, err := o.fs.lstat(o.path) if err == nil { diff --git a/backend/mega/mega.go b/backend/mega/mega.go index 784a2a039..925bd3b59 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -871,7 +871,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { return errors.Errorf("MergeDirs failed to find node for: %v", srcDir) } - // list the the objects + // list the objects infos := []*mega.Node{} _, err := f.list(ctx, srcDirNode, func(info *mega.Node) bool { infos = append(infos, info) diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go index 3ab0a7606..6e89bcca6 100644 --- a/backend/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -63,7 +63,7 @@ func init() { // ? (question mark) -> '?' // FULLWIDTH QUESTION MARK // * (asterisk) -> '*' // FULLWIDTH ASTERISK // - // Additionally names can't begin or end with a ASCII whitespace. + // Additionally names can't begin or end with an ASCII whitespace. // List of replaced characters: // (space) -> '␠' // SYMBOL FOR SPACE // (horizontal tab) -> '␉' // SYMBOL FOR HORIZONTAL TABULATION diff --git a/backend/qingstor/upload.go b/backend/qingstor/upload.go index 7fb2e2b15..fb9202106 100644 --- a/backend/qingstor/upload.go +++ b/backend/qingstor/upload.go @@ -255,7 +255,7 @@ func (mu *multiUploader) readChunk(ch chan chunk) { } } -// initiate init an Multiple Object and obtain UploadID +// initiate init a Multiple Object and obtain UploadID func (mu *multiUploader) initiate() error { bucketInit, _ := mu.bucketInit() req := qs.InitiateMultipartUploadInput{ diff --git a/backend/seafile/object.go b/backend/seafile/object.go index f214331f5..25257825a 100644 --- a/backend/seafile/object.go +++ b/backend/seafile/object.go @@ -85,7 +85,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo // Update in to the object with the modTime given of the given size // -// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { diff --git a/backend/seafile/seafile.go b/backend/seafile/seafile.go index a4f5f2a87..75ab5afad 100644 --- a/backend/seafile/seafile.go +++ b/backend/seafile/seafile.go @@ -528,7 +528,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // Put in to the remote path with the modTime given of the given size // -// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // diff --git a/backend/seafile/webapi.go b/backend/seafile/webapi.go index bf9b43af2..8c067ac6c 100644 --- a/backend/seafile/webapi.go +++ b/backend/seafile/webapi.go @@ -35,7 +35,7 @@ func (f *Fs) getAuthorizationToken(ctx context.Context) (string, error) { return getAuthorizationToken(ctx, f.srv, f.opt.User, f.opt.Password, "") } -// getAuthorizationToken can be called outside of a fs (during configuration of the remote to get the authentication token) +// getAuthorizationToken can be called outside of an fs (during configuration of the remote to get the authentication token) // it's doing a single call (no pacer involved) func getAuthorizationToken(ctx context.Context, srv *rest.Client, user, password, oneTimeCode string) (string, error) { // API Documentation diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index f67852af1..c3020d10c 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -94,7 +94,7 @@ when the ssh-agent contains many keys.`, Name: "use_insecure_cipher", Help: `Enable the use of insecure ciphers and key exchange methods. -This enables the use of the the following insecure ciphers and key exchange methods: +This enables the use of the following insecure ciphers and key exchange methods: - aes128-cbc - aes192-cbc diff --git a/backend/sugarsync/sugarsync.go b/backend/sugarsync/sugarsync.go index ae306caae..7507de3ec 100644 --- a/backend/sugarsync/sugarsync.go +++ b/backend/sugarsync/sugarsync.go @@ -230,7 +230,7 @@ func (f *Fs) Features() *fs.Features { return f.features } -// parsePath parses an sugarsync 'url' +// parsePath parses a sugarsync 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return diff --git a/backend/tardigrade/fs.go b/backend/tardigrade/fs.go index 05819a8a8..1a08f5cba 100644 --- a/backend/tardigrade/fs.go +++ b/backend/tardigrade/fs.go @@ -518,7 +518,7 @@ func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err e // Put in to the remote path with the modTime given of the given size // -// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should // either return an error or upload it properly (rather than e.g. calling // panic). diff --git a/backend/tardigrade/object.go b/backend/tardigrade/object.go index 0b3803e3c..f7b31d78f 100644 --- a/backend/tardigrade/object.go +++ b/backend/tardigrade/object.go @@ -177,7 +177,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadC // Update in to the object with the modTime given of the given size // -// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { diff --git a/backend/union/entry.go b/backend/union/entry.go index 232241f28..2f5b38b0d 100644 --- a/backend/union/entry.go +++ b/backend/union/entry.go @@ -55,7 +55,7 @@ func (d *Directory) candidates() []upstream.Entry { // Update in to the object with the modTime given of the given size // -// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { diff --git a/backend/union/union.go b/backend/union/union.go index b449097a5..3ce07d756 100644 --- a/backend/union/union.go +++ b/backend/union/union.go @@ -80,7 +80,7 @@ type Fs struct { searchPolicy policy.Policy // policy for SEARCH } -// Wrap candidate objects in to an union Object +// Wrap candidate objects in to a union Object func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) { e, err := f.searchEntries(entries...) if err != nil { diff --git a/backend/union/upstream/upstream.go b/backend/union/upstream/upstream.go index 30cb1f983..2a58048e8 100644 --- a/backend/union/upstream/upstream.go +++ b/backend/union/upstream/upstream.go @@ -219,7 +219,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt // Update in to the object with the modTime given of the given size // -// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { diff --git a/backend/webdav/api/types.go b/backend/webdav/api/types.go index 376854f05..89f207327 100644 --- a/backend/webdav/api/types.go +++ b/backend/webdav/api/types.go @@ -155,7 +155,7 @@ func (e *Error) Error() string { return strings.Join(out, ": ") } -// Time represents represents date and time information for the +// Time represents date and time information for the // webdav API marshalling to and from timeFormat type Time time.Time diff --git a/bin/check-merged.go b/bin/check-merged.go index ad68269b7..555976cbb 100644 --- a/bin/check-merged.go +++ b/bin/check-merged.go @@ -55,7 +55,7 @@ func gitDiffDiff(rev1, rev2 string) { if errors.As(err, &exitErr) && exitErr.ExitCode() == 1 { // OK just different } else { - log.Fatalf("git diff diff failed: %#v", err) + log.Fatalf("git diff failed: %#v", err) } } _, _ = os.Stdout.Write(out) diff --git a/cmd/backend/backend.go b/cmd/backend/backend.go index 12c5a3f33..af270061b 100644 --- a/cmd/backend/backend.go +++ b/cmd/backend/backend.go @@ -140,7 +140,7 @@ func showHelp(fsInfo *fs.RegInfo) error { fmt.Printf("### Backend commands\n\n") fmt.Printf(`Here are the commands specific to the %s backend. -Run them with with +Run them with rclone backend COMMAND remote: diff --git a/cmd/cmd.go b/cmd/cmd.go index 5f1729401..b8044fe63 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -76,7 +76,7 @@ func ShowVersion() { fmt.Printf("- go version: %s\n", runtime.Version()) } -// NewFsFile creates a Fs from a name but may point to a file. +// NewFsFile creates an Fs from a name but may point to a file. // // It returns a string with the file name if points to a file // otherwise "". @@ -99,7 +99,7 @@ func NewFsFile(remote string) (fs.Fs, string) { return nil, "" } -// newFsFileAddFilter creates a src Fs from a name +// newFsFileAddFilter creates an src Fs from a name // // This works the same as NewFsFile however it adds filters to the Fs // to limit it to a single file if the remote pointed to a file. diff --git a/cmd/config/config.go b/cmd/config/config.go index 64450a6ee..75646e071 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -120,7 +120,7 @@ var configCreateCommand = &cobra.Command{ Short: `Create a new remote with name, type and options.`, Long: ` Create a new remote of with and options. The options -should be passed in in pairs of . +should be passed in pairs of . For example to make a swift remote of name myremote using auto config you would do: @@ -204,7 +204,7 @@ var configPasswordCommand = &cobra.Command{ Short: `Update password in an existing remote.`, Long: ` Update an existing remote's password. The password -should be passed in in pairs of . +should be passed in pairs of . For example to set password of a remote of name myremote you would do: diff --git a/cmd/mountlib/mount.go b/cmd/mountlib/mount.go index f5ea13c02..9e742c266 100644 --- a/cmd/mountlib/mount.go +++ b/cmd/mountlib/mount.go @@ -385,7 +385,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full. return commandDefinition } -// ClipBlocks clips the blocks pointed to to the OS max +// ClipBlocks clips the blocks pointed to the OS max func ClipBlocks(b *uint64) { var max uint64 switch runtime.GOOS { diff --git a/docs/content/swift.md b/docs/content/swift.md index 94fb398e8..d1fad6ab6 100644 --- a/docs/content/swift.md +++ b/docs/content/swift.md @@ -513,7 +513,7 @@ investigate further with the `--dump-bodies` flag. This may also be caused by specifying the region when you shouldn't have (eg OVH). -#### Rclone gives Failed to create file system: Response didn't have storage storage url and auth token #### +#### Rclone gives Failed to create file system: Response didn't have storage url and auth token #### This is most likely caused by forgetting to specify your tenant when setting up a swift remote. diff --git a/fs/accounting/accounting.go b/fs/accounting/accounting.go index 298b91243..0c6c0a50c 100644 --- a/fs/accounting/accounting.go +++ b/fs/accounting/accounting.go @@ -58,7 +58,7 @@ type accountValues struct { const averagePeriod = 16 // period to do exponentially weighted averages over -// newAccountSizeName makes a Account reader for an io.ReadCloser of +// newAccountSizeName makes an Account reader for an io.ReadCloser of // the given size and name func newAccountSizeName(stats *StatsInfo, in io.ReadCloser, size int64, name string) *Account { acc := &Account{ diff --git a/fs/chunkedreader/chunkedreader.go b/fs/chunkedreader/chunkedreader.go index 313ffac14..7cad4caa9 100644 --- a/fs/chunkedreader/chunkedreader.go +++ b/fs/chunkedreader/chunkedreader.go @@ -16,7 +16,7 @@ var ( ErrorInvalidSeek = errors.New("invalid seek position") ) -// ChunkedReader is a reader for a Object with the possibility +// ChunkedReader is a reader for an Object with the possibility // of reading the source in chunks of given size // // An initialChunkSize of <= 0 will disable chunked reading. diff --git a/fs/fs.go b/fs/fs.go index 0727c705f..6fd55099c 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -82,7 +82,7 @@ type RegInfo struct { // Prefix for command line flags for this fs - defaults to Name if not set Prefix string // Create a new file system. If root refers to an existing - // object, then it should return a Fs which which points to + // object, then it should return an Fs which which points to // the parent of that object and ErrorIsFile. NewFs func(name string, root string, config configmap.Mapper) (Fs, error) `json:"-"` // Function to call to help with config @@ -191,7 +191,7 @@ func (o *Option) String() string { return fmt.Sprint(o.GetValue()) } -// Set a Option from a string +// Set an Option from a string func (o *Option) Set(s string) (err error) { newValue, err := configstruct.StringToInterface(o.GetValue(), s) if err != nil { @@ -274,7 +274,7 @@ type Fs interface { // Put in to the remote path with the modTime given of the given size // - // When called from outside a Fs by rclone, src.Size() will always be >= 0. + // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Put should either // return an error or upload it properly (rather than e.g. calling panic). // @@ -327,7 +327,7 @@ type Object interface { // Update in to the object with the modTime given of the given size // - // When called from outside a Fs by rclone, src.Size() will always be >= 0. + // When called from outside an Fs by rclone, src.Size() will always be >= 0. // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // return an error or update the object properly (rather than e.g. calling panic). Update(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) error diff --git a/fs/operations/operations.go b/fs/operations/operations.go index 813e2b0e8..adf32e072 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -748,7 +748,7 @@ func checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHas return false, false } -// checkFn is the the type of the checking function used in CheckFn() +// checkFn is the type of the checking function used in CheckFn() type checkFn func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) // checkMarch is used to march over two Fses in the same way as @@ -1080,7 +1080,7 @@ func hashSum(ctx context.Context, ht hash.Type, o fs.Object) (string, error) { return sum, err } -// HashLister does a md5sum equivalent for the hash type passed in +// HashLister does an md5sum equivalent for the hash type passed in func HashLister(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { sum, _ := hashSum(ctx, ht, o) @@ -1088,7 +1088,7 @@ func HashLister(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { }) } -// HashListerBase64 does a md5sum equivalent for the hash type passed in with base64 encoded +// HashListerBase64 does an md5sum equivalent for the hash type passed in with base64 encoded func HashListerBase64(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error { return ListFn(ctx, f, func(o fs.Object) { sum, err := hashSum(ctx, ht, o) diff --git a/fs/rc/params.go b/fs/rc/params.go index 306786fda..cf8bf95d3 100644 --- a/fs/rc/params.go +++ b/fs/rc/params.go @@ -103,7 +103,7 @@ func (p Params) GetString(key string) (string, error) { return str, nil } -// GetInt64 gets a int64 parameter from the input +// GetInt64 gets an int64 parameter from the input // // If the parameter isn't found then error will be of type // ErrParamNotFound and the returned value will be 0. diff --git a/fs/sync/sync.go b/fs/sync/sync.go index bc579fee6..5d3eb0cd1 100644 --- a/fs/sync/sync.go +++ b/fs/sync/sync.go @@ -732,7 +732,7 @@ func (s *syncCopyMove) makeRenameMap() { fs.Infof(s.fdst, "Finished making map for --track-renames") } -// tryRename renames a src object when doing track renames if +// tryRename renames an src object when doing track renames if // possible, it returns true if the object was renamed. func (s *syncCopyMove) tryRename(src fs.Object) bool { // Calculate the hash of the src object diff --git a/lib/encoder/encoder.go b/lib/encoder/encoder.go index 29a82feba..f69878367 100644 --- a/lib/encoder/encoder.go +++ b/lib/encoder/encoder.go @@ -1095,7 +1095,7 @@ func (i identity) ToStandardName(s string) string { return ToStandardName(i, s) } -// Identity returns a Encoder that always returns the input value +// Identity returns an Encoder that always returns the input value func Identity() Encoder { return identity{} } diff --git a/lib/pacer/pacer.go b/lib/pacer/pacer.go index 71aea4b9d..dffa803f9 100644 --- a/lib/pacer/pacer.go +++ b/lib/pacer/pacer.go @@ -61,7 +61,7 @@ func MaxConnectionsOption(maxConnections int) Option { return func(p *pacerOptions) { p.maxConnections = maxConnections } } -// InvokerOption sets a InvokerFunc for the new Pacer. +// InvokerOption sets an InvokerFunc for the new Pacer. func InvokerOption(invoker InvokerFunc) Option { return func(p *pacerOptions) { p.invoker = invoker } } @@ -250,7 +250,7 @@ func RetryAfterError(err error, retryAfter time.Duration) error { } } -// IsRetryAfter returns true if the the error or any of it's Cause's is an error +// IsRetryAfter returns true if the error or any of it's Cause's is an error // returned by RetryAfterError. It also returns the associated Duration if possible. func IsRetryAfter(err error) (retryAfter time.Duration, isRetryAfter bool) { errors.Walk(err, func(err error) bool { diff --git a/vfs/file.go b/vfs/file.go index 3f9583ab6..2d1a3c59e 100644 --- a/vfs/file.go +++ b/vfs/file.go @@ -47,8 +47,8 @@ type File struct { writers []Handle // writers for this file nwriters int32 // len(writers) which is read/updated with atomic readWriters int // how many RWFileHandle are open for writing - readWriterClosing bool // is a RWFileHandle currently cosing? - modified bool // has the cache file be modified by a RWFileHandle? + readWriterClosing bool // is an RWFileHandle currently cosing? + modified bool // has the cache file be modified by an RWFileHandle? pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written pendingRenameFun func(ctx context.Context) error // will be run/renamed after all writers close appendMode bool // file was opened with O_APPEND