From fdd2f8e6d21d1cf95b33fd2bb09f9c1caef0069d Mon Sep 17 00:00:00 2001 From: albertony <12441419+albertony@users.noreply.github.com> Date: Wed, 8 Jun 2022 22:54:39 +0200 Subject: [PATCH] Error strings should not be capitalized Reported by staticcheck 2022.1.2 (v0.3.2) See: staticcheck.io --- backend/azureblob/azureblob.go | 26 +++++++++++++------------- backend/azureblob/imds.go | 4 ++-- backend/cache/cache.go | 2 +- backend/cache/plex.go | 2 +- backend/cache/storage_persistent.go | 14 +++++++------- backend/compress/compress.go | 16 ++++++++-------- backend/crypt/cipher.go | 4 ++-- backend/crypt/crypt.go | 2 +- backend/crypt/pkcs7/pkcs7.go | 10 +++++----- backend/drive/drive.go | 8 ++++---- backend/dropbox/dropbox.go | 4 ++-- backend/fichier/api.go | 4 ++-- backend/fichier/fichier.go | 2 +- backend/ftp/ftp.go | 4 ++-- backend/hdfs/fs.go | 2 +- backend/koofr/koofr.go | 4 ++-- backend/mailru/api/helpers.go | 6 +++--- backend/mailru/mailru.go | 12 ++++++------ backend/qingstor/qingstor.go | 2 +- backend/qingstor/upload.go | 2 +- backend/seafile/seafile.go | 8 ++++---- backend/seafile/webapi.go | 2 +- backend/sftp/sftp.go | 2 +- backend/swift/swift.go | 2 +- backend/uptobox/uptobox.go | 6 +++--- cmd/cmd.go | 2 +- cmd/config/config.go | 2 +- cmd/cryptdecode/cryptdecode.go | 2 +- cmd/lsf/lsf.go | 2 +- cmd/mount/dir.go | 2 +- cmd/mountlib/check_linux.go | 2 +- cmd/mountlib/check_other.go | 4 ++-- cmd/rc/rc.go | 2 +- cmd/serve/ftp/ftp.go | 20 ++++++++++---------- cmd/serve/restic/restic.go | 2 +- cmd/serve/sftp/connection.go | 2 +- cmd/settier/settier.go | 2 +- cmd/tree/tree.go | 6 +++--- fs/accounting/accounting.go | 2 +- fs/config/configfile/configfile.go | 16 ++++++++-------- fs/config/crypt.go | 6 +++--- fs/cutoffmode.go | 4 ++-- fs/dump.go | 2 +- fs/dump_test.go | 4 ++-- fs/filter/filter.go | 4 ++-- fs/filter/filter_test.go | 4 ++-- fs/hash/hash.go | 2 +- fs/log.go | 4 ++-- fs/open_options.go | 10 +++++----- fs/operations/check.go | 8 ++++---- fs/operations/lsjson.go | 2 +- fs/operations/operations.go | 10 +++++----- fs/operations/operations_test.go | 4 ++-- fs/rc/internal.go | 2 +- fs/rc/rcserver/rcserver_test.go | 2 +- fs/rc/webgui/webgui.go | 2 +- fs/sync/sync.go | 2 +- fstest/mockobject/mockobject.go | 2 +- fstest/test_all/clean.go | 2 +- lib/http/http.go | 12 ++++++------ lib/jwtutil/jwtutil.go | 2 +- lib/rest/url.go | 2 +- vfs/file.go | 2 +- vfs/vfscommon/cachemode.go | 4 ++-- vfs/vfsflags/filemode.go | 2 +- 65 files changed, 159 insertions(+), 159 deletions(-) diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 58f4552b9..971bdc1d1 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -539,10 +539,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e err = checkUploadChunkSize(opt.ChunkSize) if err != nil { - return nil, fmt.Errorf("azure: chunk size: %w", err) + return nil, fmt.Errorf("chunk size: %w", err) } if opt.ListChunkSize > maxListChunkSize { - return nil, fmt.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize) + return nil, fmt.Errorf("blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize) } if opt.Endpoint == "" { opt.Endpoint = storageDefaultBaseURL @@ -551,12 +551,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e if opt.AccessTier == "" { opt.AccessTier = string(defaultAccessTier) } else if !validateAccessTier(opt.AccessTier) { - return nil, fmt.Errorf("Azure Blob: Supported access tiers are %s, %s and %s", + return nil, fmt.Errorf("supported access tiers are %s, %s and %s", string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive)) } if !validatePublicAccess((opt.PublicAccess)) { - return nil, fmt.Errorf("Azure Blob: Supported public access level are %s and %s", + return nil, fmt.Errorf("supported public access level are %s and %s", string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer)) } @@ -598,7 +598,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e case opt.UseEmulator: credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey) if err != nil { - return nil, fmt.Errorf("Failed to parse credentials: %w", err) + return nil, fmt.Errorf("failed to parse credentials: %w", err) } u, err = url.Parse(emulatorBlobEndpoint) if err != nil { @@ -644,7 +644,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e }) if err != nil { - return nil, fmt.Errorf("Failed to acquire MSI token: %w", err) + return nil, fmt.Errorf("failed to acquire MSI token: %w", err) } u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) @@ -679,7 +679,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e case opt.Account != "" && opt.Key != "": credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key) if err != nil { - return nil, fmt.Errorf("Failed to parse credentials: %w", err) + return nil, fmt.Errorf("failed to parse credentials: %w", err) } u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) @@ -699,7 +699,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e parts := azblob.NewBlobURLParts(*u) if parts.ContainerName != "" { if f.rootContainer != "" && parts.ContainerName != f.rootContainer { - return nil, errors.New("Container name in SAS URL and container provided in command do not match") + return nil, errors.New("container name in SAS URL and container provided in command do not match") } containerURL := azblob.NewContainerURL(*u, pipeline) f.cntURLcache[parts.ContainerName] = &containerURL @@ -727,7 +727,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options) serviceURL = azblob.NewServiceURL(*u, pipe) default: - return nil, errors.New("No authentication method configured") + return nil, errors.New("no authentication method configured") } f.svcURL = &serviceURL @@ -1337,7 +1337,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { } data, err := base64.StdEncoding.DecodeString(o.md5) if err != nil { - return "", fmt.Errorf("Failed to decode Content-MD5: %q: %w", o.md5, err) + return "", fmt.Errorf("failed to decode Content-MD5: %q: %w", o.md5, err) } return hex.EncodeToString(data), nil } @@ -1527,7 +1527,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read var offset int64 var count int64 if o.AccessTier() == azblob.AccessTierArchive { - return nil, fmt.Errorf("Blob in archive tier, you need to set tier to hot or cool first") + return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot or cool first") } fs.FixRangeOption(options, o.size) for _, option := range options { @@ -1752,7 +1752,7 @@ func (o *Object) AccessTier() azblob.AccessTierType { // SetTier performs changing object tier func (o *Object) SetTier(tier string) error { if !validateAccessTier(tier) { - return fmt.Errorf("Tier %s not supported by Azure Blob Storage", tier) + return fmt.Errorf("tier %s not supported by Azure Blob Storage", tier) } // Check if current tier already matches with desired tier @@ -1768,7 +1768,7 @@ func (o *Object) SetTier(tier string) error { }) if err != nil { - return fmt.Errorf("Failed to set Blob Tier: %w", err) + return fmt.Errorf("failed to set Blob Tier: %w", err) } // Set access tier on local object also, this typically diff --git a/backend/azureblob/imds.go b/backend/azureblob/imds.go index b23e91d62..84c6379e6 100644 --- a/backend/azureblob/imds.go +++ b/backend/azureblob/imds.go @@ -119,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) { b, err := ioutil.ReadAll(resp.Body) if err != nil { - return result, fmt.Errorf("Couldn't read IMDS response: %w", err) + return result, fmt.Errorf("couldn't read IMDS response: %w", err) } // Remove BOM, if any. azcopy does this so I'm following along. b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) @@ -130,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) { // storage API call. err = json.Unmarshal(b, &result) if err != nil { - return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err) + return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err) } return result, nil diff --git a/backend/cache/cache.go b/backend/cache/cache.go index fc038f6ad..1c75aea4a 100644 --- a/backend/cache/cache.go +++ b/backend/cache/cache.go @@ -1128,7 +1128,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( case fs.Directory: _ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o)) default: - return fmt.Errorf("Unknown object type %T", entry) + return fmt.Errorf("unknown object type %T", entry) } } diff --git a/backend/cache/plex.go b/backend/cache/plex.go index e3a333c73..09ea47233 100644 --- a/backend/cache/plex.go +++ b/backend/cache/plex.go @@ -213,7 +213,7 @@ func (p *plexConnector) authenticate() error { var data map[string]interface{} err = json.NewDecoder(resp.Body).Decode(&data) if err != nil { - return fmt.Errorf("failed to obtain token: %v", err) + return fmt.Errorf("failed to obtain token: %w", err) } tokenGen, ok := get(data, "user", "authToken") if !ok { diff --git a/backend/cache/storage_persistent.go b/backend/cache/storage_persistent.go index 0de9a3963..eb36a0806 100644 --- a/backend/cache/storage_persistent.go +++ b/backend/cache/storage_persistent.go @@ -250,7 +250,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error) if val != nil { err := json.Unmarshal(val, cachedDir) if err != nil { - return fmt.Errorf("error during unmarshalling obj: %v", err) + return fmt.Errorf("error during unmarshalling obj: %w", err) } } else { return fmt.Errorf("missing cached dir: %v", cachedDir) @@ -551,7 +551,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) { err := b.db.Update(func(tx *bolt.Tx) error { dataTsBucket := tx.Bucket([]byte(DataTsBucket)) if dataTsBucket == nil { - return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket) + return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket) } // iterate through ts c := dataTsBucket.Cursor() @@ -901,16 +901,16 @@ func (b *Persistent) rollbackPendingUpload(remote string) error { v := bucket.Get([]byte(remote)) err = json.Unmarshal(v, tempObj) if err != nil { - return fmt.Errorf("pending upload (%v) not found %v", remote, err) + return fmt.Errorf("pending upload (%v) not found: %w", remote, err) } tempObj.Started = false v2, err := json.Marshal(tempObj) if err != nil { - return fmt.Errorf("pending upload not updated %v", err) + return fmt.Errorf("pending upload not updated: %w", err) } err = bucket.Put([]byte(tempObj.DestPath), v2) if err != nil { - return fmt.Errorf("pending upload not updated %v", err) + return fmt.Errorf("pending upload not updated: %w", err) } return nil }) @@ -966,11 +966,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload } v2, err := json.Marshal(tempObj) if err != nil { - return fmt.Errorf("pending upload not updated %v", err) + return fmt.Errorf("pending upload not updated: %w", err) } err = bucket.Put([]byte(tempObj.DestPath), v2) if err != nil { - return fmt.Errorf("pending upload not updated %v", err) + return fmt.Errorf("pending upload not updated: %w", err) } return nil diff --git a/backend/compress/compress.go b/backend/compress/compress.go index 6d3397dfb..dcc54a11c 100644 --- a/backend/compress/compress.go +++ b/backend/compress/compress.go @@ -222,7 +222,7 @@ func processFileName(compressedFileName string) (origFileName string, extension // Separate the filename and size from the extension extensionPos := strings.LastIndex(compressedFileName, ".") if extensionPos == -1 { - return "", "", 0, errors.New("File name has no extension") + return "", "", 0, errors.New("file name has no extension") } extension = compressedFileName[extensionPos:] nameWithSize := compressedFileName[:extensionPos] @@ -231,11 +231,11 @@ func processFileName(compressedFileName string) (origFileName string, extension } match := nameRegexp.FindStringSubmatch(nameWithSize) if match == nil || len(match) != 3 { - return "", "", 0, errors.New("Invalid filename") + return "", "", 0, errors.New("invalid filename") } size, err := base64ToInt64(match[2]) if err != nil { - return "", "", 0, errors.New("Could not decode size") + return "", "", 0, errors.New("could not decode size") } return match[1], gzFileExt, size, nil } @@ -304,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er case fs.Directory: f.addDir(&newEntries, x) default: - return nil, fmt.Errorf("Unknown object type %T", entry) + return nil, fmt.Errorf("unknown object type %T", entry) } } return newEntries, nil @@ -466,10 +466,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod _ = os.Remove(tempFile.Name()) }() if err != nil { - return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err) + return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err) } if _, err = io.Copy(tempFile, in); err != nil { - return nil, fmt.Errorf("Failed to write temporary local file: %w", err) + return nil, fmt.Errorf("failed to write temporary local file: %w", err) } if _, err = tempFile.Seek(0, 0); err != nil { return nil, err @@ -720,7 +720,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) { err = oldObj.(*Object).Object.Remove(ctx) if err != nil { - return nil, fmt.Errorf("Could remove original object: %w", err) + return nil, fmt.Errorf("couldn't remove original object: %w", err) } } @@ -729,7 +729,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt if compressible { wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object) if err != nil { - return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err) + return nil, fmt.Errorf("couldn't rename streamed object: %w", err) } newObj.Object = wrapObj } diff --git a/backend/crypt/cipher.go b/backend/crypt/cipher.go index 4a3556295..1b265ad19 100644 --- a/backend/crypt/cipher.go +++ b/backend/crypt/cipher.go @@ -96,7 +96,7 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) { case "obfuscate": mode = NameEncryptionObfuscated default: - err = fmt.Errorf("Unknown file name encryption mode %q", s) + err = fmt.Errorf("unknown file name encryption mode %q", s) } return mode, err } @@ -162,7 +162,7 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) { case "base32768": enc = base32768.SafeEncoding default: - err = fmt.Errorf("Unknown file name encoding mode %q", s) + err = fmt.Errorf("unknown file name encoding mode %q", s) } return enc, err } diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index 11dd55dad..33e57cdb9 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -328,7 +328,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr case fs.Directory: f.addDir(ctx, &newEntries, x) default: - return nil, fmt.Errorf("Unknown object type %T", entry) + return nil, fmt.Errorf("unknown object type %T", entry) } } return newEntries, nil diff --git a/backend/crypt/pkcs7/pkcs7.go b/backend/crypt/pkcs7/pkcs7.go index db604ae4b..d2a547042 100644 --- a/backend/crypt/pkcs7/pkcs7.go +++ b/backend/crypt/pkcs7/pkcs7.go @@ -8,11 +8,11 @@ import "errors" // Errors Unpad can return var ( - ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded") - ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize") - ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long") - ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short") - ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same") + ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded") + ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize") + ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long") + ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short") + ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same") ) // Pad buf using PKCS#7 to a multiple of n. diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 9eac37a0e..918faddf9 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -2218,10 +2218,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType) if exportExt == "" { - return nil, fmt.Errorf("No export format found for %q", importMimeType) + return nil, fmt.Errorf("no export format found for %q", importMimeType) } if exportExt != srcExt && !f.opt.AllowImportNameChange { - return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt) + return nil, fmt.Errorf("can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt) } } } @@ -2526,7 +2526,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, // result of List() func (f *Fs) Purge(ctx context.Context, dir string) error { if f.opt.TrashedOnly { - return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files") + return errors.New("can't purge with --drive-trashed-only, use delete if you want to selectively delete files") } return f.purgeCheck(ctx, dir, false) } @@ -3715,7 +3715,7 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt url += "acknowledgeAbuse=true" _, res, err = o.httpResponse(ctx, url, "GET", options) } else { - err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err) + err = fmt.Errorf("use the --drive-acknowledge-abuse flag to download this file: %w", err) } } if err != nil { diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index 69d0b4e7d..bcee49821 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -1199,7 +1199,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, return } if len(listRes.Links) == 0 { - err = errors.New("Dropbox says the sharing link already exists, but list came back empty") + err = errors.New("sharing link already exists, but list came back empty") return } linkRes = listRes.Links[0] @@ -1211,7 +1211,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, case *sharing.FolderLinkMetadata: link = res.Url default: - err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res) + err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res) } } return diff --git a/backend/fichier/api.go b/backend/fichier/api.go index 15c916c73..1ee812c47 100644 --- a/backend/fichier/api.go +++ b/backend/fichier/api.go @@ -487,7 +487,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, fileName = f.opt.Enc.FromStandardName(fileName) if len(uploadID) > 10 || !isAlphaNumeric(uploadID) { - return nil, errors.New("Invalid UploadID") + return nil, errors.New("invalid UploadID") } opts := rest.Opts{ @@ -529,7 +529,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re // fs.Debugf(f, "Ending File Upload `%s`", uploadID) if len(uploadID) > 10 || !isAlphaNumeric(uploadID) { - return nil, errors.New("Invalid UploadID") + return nil, errors.New("invalid UploadID") } opts := rest.Opts{ diff --git a/backend/fichier/fichier.go b/backend/fichier/fichier.go index 8548d00c2..454e391ff 100644 --- a/backend/fichier/fichier.go +++ b/backend/fichier/fichier.go @@ -294,7 +294,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { path, ok := f.dirCache.GetInv(directoryID) if !ok { - return nil, errors.New("Cannot find dir in dircache") + return nil, errors.New("cannot find dir in dircache") } return f.newObjectFromFile(ctx, path, file), nil diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go index c078f78b0..d92678f08 100644 --- a/backend/ftp/ftp.go +++ b/backend/ftp/ftp.go @@ -487,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs protocol = "ftps://" } if opt.TLS && opt.ExplicitTLS { - return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config") + return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config") } var tlsConfig *tls.Config if opt.TLS || opt.ExplicitTLS { @@ -718,7 +718,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e case <-timer.C: // if timer fired assume no error but connection dead fs.Errorf(f, "Timeout when waiting for List") - return nil, errors.New("Timeout when waiting for List") + return nil, errors.New("timeout when waiting for List") } // Annoyingly FTP returns success for a directory which diff --git a/backend/hdfs/fs.go b/backend/hdfs/fs.go index 8de9ddbac..a68372db6 100644 --- a/backend/hdfs/fs.go +++ b/backend/hdfs/fs.go @@ -92,7 +92,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e if opt.ServicePrincipalName != "" { options.KerberosClient, err = getKerberosClient() if err != nil { - return nil, fmt.Errorf("Problem with kerberos authentication: %s", err) + return nil, fmt.Errorf("problem with kerberos authentication: %w", err) } options.KerberosServicePrincipleName = opt.ServicePrincipalName diff --git a/backend/koofr/koofr.go b/backend/koofr/koofr.go index f77ae019f..f893b898b 100644 --- a/backend/koofr/koofr.go +++ b/backend/koofr/koofr.go @@ -351,9 +351,9 @@ func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff } if f.mountID == "" { if opt.MountID == "" { - return nil, errors.New("Failed to find primary mount") + return nil, errors.New("failed to find primary mount") } - return nil, errors.New("Failed to find mount " + opt.MountID) + return nil, errors.New("failed to find mount " + opt.MountID) } rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root)) if err == nil && rootFile.Type != "dir" { diff --git a/backend/mailru/api/helpers.go b/backend/mailru/api/helpers.go index 1b43eb182..d7164185f 100644 --- a/backend/mailru/api/helpers.go +++ b/backend/mailru/api/helpers.go @@ -16,9 +16,9 @@ import ( // protocol errors var ( - ErrorPrematureEOF = errors.New("Premature EOF") - ErrorInvalidLength = errors.New("Invalid length") - ErrorZeroTerminate = errors.New("String must end with zero") + ErrorPrematureEOF = errors.New("premature EOF") + ErrorInvalidLength = errors.New("invalid length") + ErrorZeroTerminate = errors.New("string must end with zero") ) // BinWriter is a binary protocol writer diff --git a/backend/mailru/mailru.go b/backend/mailru/mailru.go index 24693c67c..a9a9bab2d 100644 --- a/backend/mailru/mailru.go +++ b/backend/mailru/mailru.go @@ -435,10 +435,10 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) { t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password) } if err == nil && !tokenIsValid(t) { - err = errors.New("Invalid token") + err = errors.New("invalid token") } if err != nil { - return fmt.Errorf("Failed to authorize: %w", err) + return fmt.Errorf("failed to authorize: %w", err) } if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil { @@ -580,7 +580,7 @@ func readBodyWord(res *http.Response) (word string, err error) { word = strings.Split(line, " ")[0] } if word == "" { - return "", errors.New("Empty reply from dispatcher") + return "", errors.New("empty reply from dispatcher") } return word, nil } @@ -1684,7 +1684,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src) if err != nil { - return fmt.Errorf("Failed to create spool file: %w", err) + return fmt.Errorf("failed to create spool file: %w", err) } if o.putByHash(ctx, mrHash, src, "spool") { // If put by hash is successful, ignore transitive error @@ -1966,7 +1966,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) error { return fs.ErrorIsDir } if newObj.remote != o.remote { - return fmt.Errorf("File %q path has changed to %q", o.remote, newObj.remote) + return fmt.Errorf("file %q path has changed to %q", o.remote, newObj.remote) } o.hasMetaData = true o.size = newObj.size @@ -2318,7 +2318,7 @@ func (p *serverPool) Dispatch(ctx context.Context, current string) (string, erro }) if err != nil || url == "" { closeBody(res) - return "", fmt.Errorf("Failed to request file server: %w", err) + return "", fmt.Errorf("failed to request file server: %w", err) } p.addServer(url, now) diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index 5f5134099..9e9b0c6cc 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -253,7 +253,7 @@ func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error) _protocol, _host, _port, err := qsParseEndpoint(endpoint) if err != nil { - return nil, fmt.Errorf("The endpoint \"%s\" format error", endpoint) + return nil, fmt.Errorf("the endpoint \"%s\" format error", endpoint) } if _protocol != "" { diff --git a/backend/qingstor/upload.go b/backend/qingstor/upload.go index d9a818019..797e718f8 100644 --- a/backend/qingstor/upload.go +++ b/backend/qingstor/upload.go @@ -184,7 +184,7 @@ func (u *uploader) upload() error { fs.Debugf(u, "Uploading as single part object to QingStor") return u.singlePartUpload(reader, u.readerPos) } else if err != nil { - return fmt.Errorf("read upload data failed: %s", err) + return fmt.Errorf("read upload data failed: %w", err) } fs.Debugf(u, "Uploading as multi-part object to QingStor") diff --git a/backend/seafile/seafile.go b/backend/seafile/seafile.go index 15a708641..ffae61a5a 100644 --- a/backend/seafile/seafile.go +++ b/backend/seafile/seafile.go @@ -886,7 +886,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string // 1- rename source err = srcFs.renameDir(ctx, srcLibraryID, srcPath, tempName) if err != nil { - return fmt.Errorf("Cannot rename source directory to a temporary name: %w", err) + return fmt.Errorf("cannot rename source directory to a temporary name: %w", err) } // 2- move source to destination @@ -900,7 +900,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string // 3- rename destination back to source name err = f.renameDir(ctx, dstLibraryID, path.Join(dstDir, tempName), dstName) if err != nil { - return fmt.Errorf("Cannot rename temporary directory to destination name: %w", err) + return fmt.Errorf("cannot rename temporary directory to destination name: %w", err) } return nil @@ -923,7 +923,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error { // CleanUp the trash in the Fs func (f *Fs) CleanUp(ctx context.Context) error { if f.libraryName == "" { - return errors.New("Cannot clean up at the root of the seafile server: please select a library to clean up") + return errors.New("cannot clean up at the root of the seafile server, please select a library to clean up") } libraryID, err := f.getLibraryID(ctx, f.libraryName) if err != nil { @@ -972,7 +972,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, libraryName, filePath := f.splitPath(remote) if libraryName == "" { // We cannot share the whole seafile server, we need at least a library - return "", errors.New("Cannot share the root of the seafile server. Please select a library to share") + return "", errors.New("cannot share the root of the seafile server, please select a library to share") } libraryID, err := f.getLibraryID(ctx, libraryName) if err != nil { diff --git a/backend/seafile/webapi.go b/backend/seafile/webapi.go index 29d2f8645..c1eb33da0 100644 --- a/backend/seafile/webapi.go +++ b/backend/seafile/webapi.go @@ -26,7 +26,7 @@ const ( // Errors specific to seafile fs var ( - ErrorInternalDuringUpload = errors.New("Internal server error during file upload") + ErrorInternalDuringUpload = errors.New("internal server error during file upload") ) // ==================== Seafile API ==================== diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index 74af5fa41..bb5add94d 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -515,7 +515,7 @@ func (f *Fs) setEnv(s *ssh.Session) error { // fs.Debugf(f, "Setting env %q = %q", env[:equal], env[equal+1:]) err := s.Setenv(env[:equal], env[equal+1:]) if err != nil { - return fmt.Errorf("Failed to set env var %q: %w", env[:equal], err) + return fmt.Errorf("failed to set env var %q: %w", env[:equal], err) } } return nil diff --git a/backend/swift/swift.go b/backend/swift/swift.go index 074e2daf7..e4bae345e 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -1303,7 +1303,7 @@ func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, } delimiter := strings.Index(dirManifest, "/") if len(dirManifest) == 0 || delimiter < 0 { - err = errors.New("Missing or wrong structure of manifest of Dynamic large object") + err = errors.New("missing or wrong structure of manifest of Dynamic large object") return } return dirManifest[:delimiter], dirManifest[delimiter+1:], nil diff --git a/backend/uptobox/uptobox.go b/backend/uptobox/uptobox.go index 56c4f80a1..e232ac83c 100644 --- a/backend/uptobox/uptobox.go +++ b/backend/uptobox/uptobox.go @@ -474,7 +474,7 @@ func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileIn func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) { if size > int64(200e9) { // max size 200GB - return nil, errors.New("File too big, cant upload") + return nil, errors.New("file too big, can't upload") } else if size == 0 { return nil, fs.ErrorCantUploadEmptyFiles } @@ -497,7 +497,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size return nil, err } if info.StatusCode != 0 { - return nil, fmt.Errorf("putUnchecked: api error: %d - %s", info.StatusCode, info.Message) + return nil, fmt.Errorf("putUnchecked api error: %d - %s", info.StatusCode, info.Message) } // we need to have a safe name for the upload to work tmpName := "rcloneTemp" + random.String(8) @@ -506,7 +506,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size return nil, err } if len(upload.Files) != 1 { - return nil, errors.New("Upload: unexpected response") + return nil, errors.New("upload unexpected response") } match := f.IDRegexp.FindStringSubmatch(upload.Files[0].URL) diff --git a/cmd/cmd.go b/cmd/cmd.go index 82ae1ad83..5dd120be7 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -117,7 +117,7 @@ func newFsFileAddFilter(remote string) (fs.Fs, string) { f, fileName := NewFsFile(remote) if fileName != "" { if !fi.InActive() { - err := fmt.Errorf("Can't limit to single files when using filters: %v", remote) + err := fmt.Errorf("can't limit to single files when using filters: %v", remote) err = fs.CountError(err) log.Fatalf(err.Error()) } diff --git a/cmd/config/config.go b/cmd/config/config.go index 96734e552..ed5700327 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -402,7 +402,7 @@ To reconnect use "rclone config reconnect". } err := doDisconnect(context.Background()) if err != nil { - return fmt.Errorf("Disconnect call failed: %w", err) + return fmt.Errorf("disconnect call failed: %w", err) } return nil }, diff --git a/cmd/cryptdecode/cryptdecode.go b/cmd/cryptdecode/cryptdecode.go index 2f938cbfa..d36c70ca4 100644 --- a/cmd/cryptdecode/cryptdecode.go +++ b/cmd/cryptdecode/cryptdecode.go @@ -48,7 +48,7 @@ See the documentation on the [crypt](/crypt/) overlay for more info. return err } if fsInfo.Name != "crypt" { - return errors.New("The remote needs to be of type \"crypt\"") + return errors.New("the remote needs to be of type \"crypt\"") } cipher, err := crypt.NewCipher(config) if err != nil { diff --git a/cmd/lsf/lsf.go b/cmd/lsf/lsf.go index 812edf8ea..732d2b5e8 100644 --- a/cmd/lsf/lsf.go +++ b/cmd/lsf/lsf.go @@ -199,7 +199,7 @@ func Lsf(ctx context.Context, fsrc fs.Fs, out io.Writer) error { case 'T': list.AddTier() default: - return fmt.Errorf("Unknown format character %q", char) + return fmt.Errorf("unknown format character %q", char) } } diff --git a/cmd/mount/dir.go b/cmd/mount/dir.go index 47ec4bf2d..b82b52179 100644 --- a/cmd/mount/dir.go +++ b/cmd/mount/dir.go @@ -199,7 +199,7 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs defer log.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err) destDir, ok := newDir.(*Dir) if !ok { - return fmt.Errorf("Unknown Dir type %T", newDir) + return fmt.Errorf("unknown Dir type %T", newDir) } err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir) diff --git a/cmd/mountlib/check_linux.go b/cmd/mountlib/check_linux.go index 3cdb34690..1f241af87 100644 --- a/cmd/mountlib/check_linux.go +++ b/cmd/mountlib/check_linux.go @@ -22,7 +22,7 @@ const ( // On Linux we use the OS-specific /proc/mount API so the check won't access the path. // Directories marked as "mounted" by autofs are considered not mounted. func CheckMountEmpty(mountpoint string) error { - const msg = "Directory already mounted, use --allow-non-empty to mount anyway: %s" + const msg = "directory already mounted, use --allow-non-empty to mount anyway: %s" mountpointAbs, err := filepath.Abs(mountpoint) if err != nil { diff --git a/cmd/mountlib/check_other.go b/cmd/mountlib/check_other.go index 0b7eeede7..c8876632c 100644 --- a/cmd/mountlib/check_other.go +++ b/cmd/mountlib/check_other.go @@ -17,7 +17,7 @@ import ( func CheckMountEmpty(mountpoint string) error { fp, err := os.Open(mountpoint) if err != nil { - return fmt.Errorf("Can not open: %s: %w", mountpoint, err) + return fmt.Errorf("cannot open: %s: %w", mountpoint, err) } defer fs.CheckClose(fp, &err) @@ -26,7 +26,7 @@ func CheckMountEmpty(mountpoint string) error { return nil } - const msg = "Directory is not empty, use --allow-non-empty to mount anyway: %s" + const msg = "directory is not empty, use --allow-non-empty to mount anyway: %s" if err == nil { return fmt.Errorf(msg, mountpoint) } diff --git a/cmd/rc/rc.go b/cmd/rc/rc.go index 6b123f97e..53e8697d8 100644 --- a/cmd/rc/rc.go +++ b/cmd/rc/rc.go @@ -211,7 +211,7 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err bodyString = err.Error() } bodyString = strings.TrimSpace(bodyString) - return nil, fmt.Errorf("Failed to read rc response: %s: %s", resp.Status, bodyString) + return nil, fmt.Errorf("failed to read rc response: %s: %s", resp.Status, bodyString) } // Parse output diff --git a/cmd/serve/ftp/ftp.go b/cmd/serve/ftp/ftp.go index f8ba81725..05f7b0924 100644 --- a/cmd/serve/ftp/ftp.go +++ b/cmd/serve/ftp/ftp.go @@ -135,11 +135,11 @@ var passivePortsRe = regexp.MustCompile(`^\s*\d+\s*-\s*\d+\s*$`) func newServer(ctx context.Context, f fs.Fs, opt *Options) (*server, error) { host, port, err := net.SplitHostPort(opt.ListenAddr) if err != nil { - return nil, errors.New("Failed to parse host:port") + return nil, errors.New("failed to parse host:port") } portNum, err := strconv.Atoi(port) if err != nil { - return nil, errors.New("Failed to parse host:port") + return nil, errors.New("failed to parse host:port") } s := &server{ @@ -284,7 +284,7 @@ func (d *Driver) ChangeDir(path string) (err error) { return err } if !n.IsDir() { - return errors.New("Not a directory") + return errors.New("not a directory") } return nil } @@ -296,12 +296,12 @@ func (d *Driver) ListDir(path string, callback func(ftp.FileInfo) error) (err er defer log.Trace(path, "")("err = %v", &err) node, err := d.vfs.Stat(path) if err == vfs.ENOENT { - return errors.New("Directory not found") + return errors.New("directory not found") } else if err != nil { return err } if !node.IsDir() { - return errors.New("Not a directory") + return errors.New("not a directory") } dir := node.(*vfs.Dir) @@ -335,7 +335,7 @@ func (d *Driver) DeleteDir(path string) (err error) { return err } if !node.IsDir() { - return errors.New("Not a directory") + return errors.New("not a directory") } err = node.Remove() if err != nil { @@ -354,7 +354,7 @@ func (d *Driver) DeleteFile(path string) (err error) { return err } if !node.IsFile() { - return errors.New("Not a file") + return errors.New("not a file") } err = node.Remove() if err != nil { @@ -392,12 +392,12 @@ func (d *Driver) GetFile(path string, offset int64) (size int64, fr io.ReadClose node, err := d.vfs.Stat(path) if err == vfs.ENOENT { fs.Infof(path, "File not found") - return 0, nil, errors.New("File not found") + return 0, nil, errors.New("file not found") } else if err != nil { return 0, nil, err } if !node.IsFile() { - return 0, nil, errors.New("Not a file") + return 0, nil, errors.New("not a file") } handle, err := node.Open(os.O_RDONLY) @@ -426,7 +426,7 @@ func (d *Driver) PutFile(path string, data io.Reader, appendData bool) (n int64, if err == nil { isExist = true if node.IsDir() { - return 0, errors.New("A dir has the same name") + return 0, errors.New("a dir has the same name") } } else { if os.IsNotExist(err) { diff --git a/cmd/serve/restic/restic.go b/cmd/serve/restic/restic.go index a8eb7b483..7139b084b 100644 --- a/cmd/serve/restic/restic.go +++ b/cmd/serve/restic/restic.go @@ -139,7 +139,7 @@ with a path of ` + "`//`" + `. s := NewServer(f, &httpflags.Opt) if stdio { if terminal.IsTerminal(int(os.Stdout.Fd())) { - return errors.New("Refusing to run HTTP2 server directly on a terminal, please let restic start rclone") + return errors.New("refusing to run HTTP2 server directly on a terminal, please let restic start rclone") } conn := &StdioConn{ diff --git a/cmd/serve/sftp/connection.go b/cmd/serve/sftp/connection.go index 693b5fbb0..d8be2bce5 100644 --- a/cmd/serve/sftp/connection.go +++ b/cmd/serve/sftp/connection.go @@ -74,7 +74,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) ( } usage, err := about(ctx) if err != nil { - return fmt.Errorf("About failed: %w", err) + return fmt.Errorf("about failed: %w", err) } total, used, free := int64(-1), int64(-1), int64(-1) if usage.Total != nil { diff --git a/cmd/settier/settier.go b/cmd/settier/settier.go index 97953727a..6be1c1c6a 100644 --- a/cmd/settier/settier.go +++ b/cmd/settier/settier.go @@ -47,7 +47,7 @@ Or just provide remote directory and all files in directory will be tiered cmd.Run(false, false, command, func() error { isSupported := fsrc.Features().SetTier if !isSupported { - return fmt.Errorf("Remote %s does not support settier", fsrc.Name()) + return fmt.Errorf("remote %s does not support settier", fsrc.Name()) } return operations.SetTier(context.Background(), fsrc, tier) diff --git a/cmd/tree/tree.go b/cmd/tree/tree.go index 4bd899462..2528d9dc2 100644 --- a/cmd/tree/tree.go +++ b/cmd/tree/tree.go @@ -102,7 +102,7 @@ For a more interactive navigation of the remote see the var err error outFile, err = os.Create(outFileName) if err != nil { - return fmt.Errorf("failed to create output file: %v", err) + return fmt.Errorf("failed to create output file: %w", err) } } opts.VerSort = opts.VerSort || sort == "version" @@ -209,7 +209,7 @@ func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) { } _, entry := dirtree.DirTree(dirs).Find(filePath) if entry == nil { - return nil, fmt.Errorf("Couldn't find %q in directory cache", filePath) + return nil, fmt.Errorf("couldn't find %q in directory cache", filePath) } return &FileInfo{entry}, nil } @@ -221,7 +221,7 @@ func (dirs Fs) ReadDir(dir string) (names []string, err error) { dir = strings.TrimLeft(dir, "/") entries, ok := dirs[dir] if !ok { - return nil, fmt.Errorf("Couldn't find directory %q", dir) + return nil, fmt.Errorf("couldn't find directory %q", dir) } for _, entry := range entries { names = append(names, path.Base(entry.Remote())) diff --git a/fs/accounting/accounting.go b/fs/accounting/accounting.go index 53d6549fb..a019232fb 100644 --- a/fs/accounting/accounting.go +++ b/fs/accounting/accounting.go @@ -19,7 +19,7 @@ import ( // ErrorMaxTransferLimitReached defines error when transfer limit is reached. // Used for checking on exit and matching to correct exit code. -var ErrorMaxTransferLimitReached = errors.New("Max transfer limit reached as set by --max-transfer") +var ErrorMaxTransferLimitReached = errors.New("max transfer limit reached as set by --max-transfer") // ErrorMaxTransferLimitReachedFatal is returned from Read when the max // transfer limit is reached. diff --git a/fs/config/configfile/configfile.go b/fs/config/configfile/configfile.go index df01f25a5..e9c498995 100644 --- a/fs/config/configfile/configfile.go +++ b/fs/config/configfile/configfile.go @@ -106,7 +106,7 @@ func (s *Storage) Save() error { configPath := config.GetConfigPath() if configPath == "" { - return fmt.Errorf("Failed to save config file: Path is empty") + return fmt.Errorf("failed to save config file, path is empty") } dir, name := filepath.Split(configPath) @@ -116,18 +116,18 @@ func (s *Storage) Save() error { } f, err := ioutil.TempFile(dir, name) if err != nil { - return fmt.Errorf("Failed to create temp file for new config: %v", err) + return fmt.Errorf("failed to create temp file for new config: %w", err) } defer func() { _ = f.Close() if err := os.Remove(f.Name()); err != nil && !os.IsNotExist(err) { - fs.Errorf(nil, "Failed to remove temp config file: %v", err) + fs.Errorf(nil, "failed to remove temp config file: %v", err) } }() var buf bytes.Buffer if err := goconfig.SaveConfigData(s.gc, &buf); err != nil { - return fmt.Errorf("Failed to save config file: %v", err) + return fmt.Errorf("failed to save config file: %w", err) } if err := config.Encrypt(&buf, f); err != nil { @@ -137,7 +137,7 @@ func (s *Storage) Save() error { _ = f.Sync() err = f.Close() if err != nil { - return fmt.Errorf("Failed to close config file: %v", err) + return fmt.Errorf("failed to close config file: %w", err) } var fileMode os.FileMode = 0600 @@ -157,10 +157,10 @@ func (s *Storage) Save() error { } if err = os.Rename(configPath, configPath+".old"); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("Failed to move previous config to backup location: %v", err) + return fmt.Errorf("failed to move previous config to backup location: %w", err) } if err = os.Rename(f.Name(), configPath); err != nil { - return fmt.Errorf("Failed to move newly written config from %s to final location: %v", f.Name(), err) + return fmt.Errorf("failed to move newly written config from %s to final location: %v", f.Name(), err) } if err := os.Remove(configPath + ".old"); err != nil && !os.IsNotExist(err) { fs.Errorf(nil, "Failed to remove backup config file: %v", err) @@ -177,7 +177,7 @@ func (s *Storage) Serialize() (string, error) { s.check() var buf bytes.Buffer if err := goconfig.SaveConfigData(s.gc, &buf); err != nil { - return "", fmt.Errorf("Failed to save config file: %v", err) + return "", fmt.Errorf("failed to save config file: %w", err) } return buf.String(), nil diff --git a/fs/config/crypt.go b/fs/config/crypt.go index 002e6c49f..6792811dc 100644 --- a/fs/config/crypt.go +++ b/fs/config/crypt.go @@ -133,7 +133,7 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) { return nil, fmt.Errorf("failed to load base64 encoded data: %w", err) } if len(box) < 24+secretbox.Overhead { - return nil, errors.New("Configuration data too short") + return nil, errors.New("configuration data too short") } var out []byte @@ -206,7 +206,7 @@ func Encrypt(src io.Reader, dst io.Writer) error { enc := base64.NewEncoder(base64.StdEncoding, dst) _, err := enc.Write(nonce[:]) if err != nil { - return fmt.Errorf("Failed to write config file: %v", err) + return fmt.Errorf("failed to write config file: %w", err) } var key [32]byte @@ -219,7 +219,7 @@ func Encrypt(src io.Reader, dst io.Writer) error { b := secretbox.Seal(nil, data, &nonce, &key) _, err = enc.Write(b) if err != nil { - return fmt.Errorf("Failed to write config file: %v", err) + return fmt.Errorf("failed to write config file: %w", err) } return enc.Close() } diff --git a/fs/cutoffmode.go b/fs/cutoffmode.go index a58c5c452..9f6390663 100644 --- a/fs/cutoffmode.go +++ b/fs/cutoffmode.go @@ -38,7 +38,7 @@ func (m *CutoffMode) Set(s string) error { return nil } } - return fmt.Errorf("Unknown cutoff mode %q", s) + return fmt.Errorf("unknown cutoff mode %q", s) } // Type of the value @@ -50,7 +50,7 @@ func (m *CutoffMode) Type() string { func (m *CutoffMode) UnmarshalJSON(in []byte) error { return UnmarshalJSONFlag(in, m, func(i int64) error { if i < 0 || i >= int64(len(cutoffModeToString)) { - return fmt.Errorf("Out of range cutoff mode %d", i) + return fmt.Errorf("out of range cutoff mode %d", i) } *m = (CutoffMode)(i) return nil diff --git a/fs/dump.go b/fs/dump.go index f0c72bcab..a15671253 100644 --- a/fs/dump.go +++ b/fs/dump.go @@ -78,7 +78,7 @@ func (f *DumpFlags) Set(s string) error { } } if !found { - return fmt.Errorf("Unknown dump flag %q", part) + return fmt.Errorf("unknown dump flag %q", part) } } *f = flags diff --git a/fs/dump_test.go b/fs/dump_test.go index c0ea248dd..ede6125e8 100644 --- a/fs/dump_test.go +++ b/fs/dump_test.go @@ -30,7 +30,7 @@ func TestDumpFlagsSet(t *testing.T) { {"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""}, {"bodies,headers,auth", DumpBodies | DumpHeaders | DumpAuth, ""}, {"headers,bodies,requests,responses,auth,filters", DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters, ""}, - {"headers,bodies,unknown,auth", 0, "Unknown dump flag \"unknown\""}, + {"headers,bodies,unknown,auth", 0, "unknown dump flag \"unknown\""}, } { f := DumpFlags(-1) initial := f @@ -69,7 +69,7 @@ func TestDumpFlagsUnmarshallJSON(t *testing.T) { {`"bodies,headers,auth"`, DumpBodies | DumpHeaders | DumpAuth, ""}, {`"bodies,headers,auth"`, DumpBodies | DumpHeaders | DumpAuth, ""}, {`"headers,bodies,requests,responses,auth,filters"`, DumpHeaders | DumpBodies | DumpRequests | DumpResponses | DumpAuth | DumpFilters, ""}, - {`"headers,bodies,unknown,auth"`, 0, "Unknown dump flag \"unknown\""}, + {`"headers,bodies,unknown,auth"`, 0, "unknown dump flag \"unknown\""}, {`0`, DumpFlags(0), ""}, {strconv.Itoa(int(DumpBodies)), DumpBodies, ""}, {strconv.Itoa(int(DumpBodies | DumpHeaders | DumpAuth)), DumpBodies | DumpHeaders | DumpAuth, ""}, diff --git a/fs/filter/filter.go b/fs/filter/filter.go index e35018fa5..741580fff 100644 --- a/fs/filter/filter.go +++ b/fs/filter/filter.go @@ -199,7 +199,7 @@ func NewFilter(opt *Opt) (f *Filter, err error) { for _, rule := range f.Opt.FilesFrom { if !inActive { - return nil, fmt.Errorf("The usage of --files-from overrides all other filters, it should be used alone or with --files-from-raw") + return nil, fmt.Errorf("the usage of --files-from overrides all other filters, it should be used alone or with --files-from-raw") } f.initAddFile() // init to show --files-from set even if no files within err := forEachLine(rule, false, func(line string) error { @@ -214,7 +214,7 @@ func NewFilter(opt *Opt) (f *Filter, err error) { // --files-from-raw can be used with --files-from, hence we do // not need to get the value of f.InActive again if !inActive { - return nil, fmt.Errorf("The usage of --files-from-raw overrides all other filters, it should be used alone or with --files-from") + return nil, fmt.Errorf("the usage of --files-from-raw overrides all other filters, it should be used alone or with --files-from") } f.initAddFile() // init to show --files-from set even if no files within err := forEachLine(rule, true, func(line string) error { diff --git a/fs/filter/filter_test.go b/fs/filter/filter_test.go index 81a01af3d..27da120c4 100644 --- a/fs/filter/filter_test.go +++ b/fs/filter/filter_test.go @@ -62,7 +62,7 @@ func TestNewFilterForbiddenMixOfFilesFromAndFilterRule(t *testing.T) { _, err := NewFilter(&Opt) require.Error(t, err) - require.Contains(t, err.Error(), "The usage of --files-from overrides all other filters") + require.Contains(t, err.Error(), "the usage of --files-from overrides all other filters") } func TestNewFilterForbiddenMixOfFilesFromRawAndFilterRule(t *testing.T) { @@ -85,7 +85,7 @@ func TestNewFilterForbiddenMixOfFilesFromRawAndFilterRule(t *testing.T) { _, err := NewFilter(&Opt) require.Error(t, err) - require.Contains(t, err.Error(), "The usage of --files-from-raw overrides all other filters") + require.Contains(t, err.Error(), "the usage of --files-from-raw overrides all other filters") } func TestNewFilterWithFilesFromAlone(t *testing.T) { diff --git a/fs/hash/hash.go b/fs/hash/hash.go index 48ecdcd4c..93b1de3b2 100644 --- a/fs/hash/hash.go +++ b/fs/hash/hash.go @@ -153,7 +153,7 @@ func (h *Type) Set(s string) error { *h = hash.hashType return nil } - return fmt.Errorf("Unknown hash type %q", s) + return fmt.Errorf("unknown hash type %q", s) } // Type of the value diff --git a/fs/log.go b/fs/log.go index 423352344..8328b9106 100644 --- a/fs/log.go +++ b/fs/log.go @@ -61,7 +61,7 @@ func (l *LogLevel) Set(s string) error { return nil } } - return fmt.Errorf("Unknown log level %q", s) + return fmt.Errorf("unknown log level %q", s) } // Type of the value @@ -73,7 +73,7 @@ func (l *LogLevel) Type() string { func (l *LogLevel) UnmarshalJSON(in []byte) error { return UnmarshalJSONFlag(in, l, func(i int64) error { if i < 0 || i >= int64(LogLevel(len(logLevelToString))) { - return fmt.Errorf("Unknown log level %d", i) + return fmt.Errorf("unknown log level %d", i) } *l = (LogLevel)(i) return nil diff --git a/fs/open_options.go b/fs/open_options.go index 938e38d04..e357a9a69 100644 --- a/fs/open_options.go +++ b/fs/open_options.go @@ -72,28 +72,28 @@ func (o *RangeOption) Header() (key string, value string) { func ParseRangeOption(s string) (po *RangeOption, err error) { const preamble = "bytes=" if !strings.HasPrefix(s, preamble) { - return nil, errors.New("Range: header invalid: doesn't start with " + preamble) + return nil, errors.New("range: header invalid: doesn't start with " + preamble) } s = s[len(preamble):] if strings.ContainsRune(s, ',') { - return nil, errors.New("Range: header invalid: contains multiple ranges which isn't supported") + return nil, errors.New("range: header invalid: contains multiple ranges which isn't supported") } dash := strings.IndexRune(s, '-') if dash < 0 { - return nil, errors.New("Range: header invalid: contains no '-'") + return nil, errors.New("range: header invalid: contains no '-'") } start, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:]) o := RangeOption{Start: -1, End: -1} if start != "" { o.Start, err = strconv.ParseInt(start, 10, 64) if err != nil || o.Start < 0 { - return nil, errors.New("Range: header invalid: bad start") + return nil, errors.New("range: header invalid: bad start") } } if end != "" { o.End, err = strconv.ParseInt(end, 10, 64) if err != nil || o.End < 0 { - return nil, errors.New("Range: header invalid: bad end") + return nil, errors.New("range: header invalid: bad end") } } return &o, nil diff --git a/fs/operations/check.go b/fs/operations/check.go index a7a52e299..5cb3b0cf9 100644 --- a/fs/operations/check.go +++ b/fs/operations/check.go @@ -80,7 +80,7 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) { if c.opt.OneWay { return false } - err := fmt.Errorf("File not in %v", c.opt.Fsrc) + err := fmt.Errorf("file not in %v", c.opt.Fsrc) fs.Errorf(dst, "%v", err) _ = fs.CountError(err) atomic.AddInt32(&c.differences, 1) @@ -102,7 +102,7 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) { func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) { switch src.(type) { case fs.Object: - err := fmt.Errorf("File not in %v", c.opt.Fdst) + err := fmt.Errorf("file not in %v", c.opt.Fdst) fs.Errorf(src, "%v", err) _ = fs.CountError(err) atomic.AddInt32(&c.differences, 1) @@ -125,7 +125,7 @@ func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (di tr.Done(ctx, err) }() if sizeDiffers(ctx, src, dst) { - err = fmt.Errorf("Sizes differ") + err = fmt.Errorf("sizes differ") fs.Errorf(src, "%v", err) return true, false, nil } @@ -424,7 +424,7 @@ func CheckSum(ctx context.Context, fsrc, fsum fs.Fs, sumFile string, hashType ha continue } // filesystem missed the file, sum wasn't consumed - err := fmt.Errorf("File not in %v", opt.Fdst) + err := fmt.Errorf("file not in %v", opt.Fdst) fs.Errorf(filename, "%v", err) _ = fs.CountError(err) if lastErr == nil { diff --git a/fs/operations/lsjson.go b/fs/operations/lsjson.go index 5af5a4295..144a47bb6 100644 --- a/fs/operations/lsjson.go +++ b/fs/operations/lsjson.go @@ -122,7 +122,7 @@ func newListJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOp return nil, fmt.Errorf("ListJSON failed to load config for crypt remote: %w", err) } if fsInfo.Name != "crypt" { - return nil, errors.New("The remote needs to be of type \"crypt\"") + return nil, errors.New("the remote needs to be of type \"crypt\"") } lj.cipher, err = crypt.NewCipher(config) if err != nil { diff --git a/fs/operations/operations.go b/fs/operations/operations.go index 5eccc44f7..bdeeaf036 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -1407,7 +1407,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file") tmpLocalFs, err := fs.TemporaryLocalFs(ctx) if err != nil { - return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err) + return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err) } defer func() { err := Purge(ctx, tmpLocalFs, "") @@ -1523,7 +1523,7 @@ func GetCompareDest(ctx context.Context) (CompareDest []fs.Fs, err error) { ci := fs.GetConfig(ctx) CompareDest, err = cache.GetArr(ctx, ci.CompareDest) if err != nil { - return nil, fserrors.FatalError(fmt.Errorf("Failed to make fs for --compare-dest %q: %v", ci.CompareDest, err)) + return nil, fserrors.FatalError(fmt.Errorf("failed to make fs for --compare-dest %q: %w", ci.CompareDest, err)) } return CompareDest, nil } @@ -1562,7 +1562,7 @@ func GetCopyDest(ctx context.Context, fdst fs.Fs) (CopyDest []fs.Fs, err error) ci := fs.GetConfig(ctx) CopyDest, err = cache.GetArr(ctx, ci.CopyDest) if err != nil { - return nil, fserrors.FatalError(fmt.Errorf("Failed to make fs for --copy-dest %q: %v", ci.CopyDest, err)) + return nil, fserrors.FatalError(fmt.Errorf("failed to make fs for --copy-dest %q: %w", ci.CopyDest, err)) } if !SameConfigArr(fdst, CopyDest) { return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination")) @@ -1777,7 +1777,7 @@ func copyURLFn(ctx context.Context, dstFileName string, url string, autoFilename _, params, err := mime.ParseMediaType(resp.Header.Get("Content-Disposition")) headerFilename := path.Base(strings.Replace(params["filename"], "\\", "/", -1)) if err != nil || headerFilename == "" { - return fmt.Errorf("copyurl failed: filename not found in the Content-Dispoition header") + return fmt.Errorf("CopyURL failed: filename not found in the Content-Dispoition header") } fs.Debugf(headerFilename, "filename found in Content-Disposition header.") return fn(ctx, headerFilename, resp.Body, resp.ContentLength, modTime) @@ -1822,7 +1822,7 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string) if ci.BackupDir != "" { backupDir, err = cache.Get(ctx, ci.BackupDir) if err != nil { - return nil, fserrors.FatalError(fmt.Errorf("Failed to make fs for --backup-dir %q: %v", ci.BackupDir, err)) + return nil, fserrors.FatalError(fmt.Errorf("failed to make fs for --backup-dir %q: %w", ci.BackupDir, err)) } if !SameConfig(fdst, backupDir) { return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination")) diff --git a/fs/operations/operations_test.go b/fs/operations/operations_test.go index 04b26e8b8..02a44d385 100644 --- a/fs/operations/operations_test.go +++ b/fs/operations/operations_test.go @@ -1696,7 +1696,7 @@ func TestCopyFileMaxTransfer(t *testing.T) { accounting.Stats(ctx).ResetCounters() err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file2.Path, file2.Path) require.NotNil(t, err, "Did not get expected max transfer limit error") - assert.Contains(t, err.Error(), "Max transfer limit reached") + assert.Contains(t, err.Error(), "max transfer limit reached") assert.True(t, fserrors.IsFatalError(err), fmt.Sprintf("Not fatal error: %v: %#v:", err, err)) r.CheckLocalItems(t, file1, file2, file3, file4) r.CheckRemoteItems(t, file1) @@ -1708,7 +1708,7 @@ func TestCopyFileMaxTransfer(t *testing.T) { accounting.Stats(ctx).ResetCounters() err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file3.Path, file3.Path) require.NotNil(t, err) - assert.Contains(t, err.Error(), "Max transfer limit reached") + assert.Contains(t, err.Error(), "max transfer limit reached") assert.True(t, fserrors.IsNoRetryError(err)) r.CheckLocalItems(t, file1, file2, file3, file4) r.CheckRemoteItems(t, file1) diff --git a/fs/rc/internal.go b/fs/rc/internal.go index 10077ae09..24e67eabb 100644 --- a/fs/rc/internal.go +++ b/fs/rc/internal.go @@ -475,7 +475,7 @@ func rcRunCommand(ctx context.Context, in Params) (out Params, err error) { cmd.Stdout = httpResponse cmd.Stderr = httpResponse } else { - return nil, fmt.Errorf("Unknown returnType %q", returnType) + return nil, fmt.Errorf("unknown returnType %q", returnType) } err = cmd.Run() diff --git a/fs/rc/rcserver/rcserver_test.go b/fs/rc/rcserver/rcserver_test.go index f75cfe52f..34181b631 100644 --- a/fs/rc/rcserver/rcserver_test.go +++ b/fs/rc/rcserver/rcserver_test.go @@ -497,7 +497,7 @@ func TestRCWithAuth(t *testing.T) { ContentType: "application/x-www-form-urlencoded", Status: http.StatusInternalServerError, Expected: `{ - "error": "Unknown returnType \"POTATO\"", + "error": "unknown returnType \"POTATO\"", "input": { "command": "version", "returnType": "POTATO" diff --git a/fs/rc/webgui/webgui.go b/fs/rc/webgui/webgui.go index ea37382ee..8153024d1 100644 --- a/fs/rc/webgui/webgui.go +++ b/fs/rc/webgui/webgui.go @@ -63,7 +63,7 @@ func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL // Get the latest release details WebUIURL, tag, size, err := GetLatestReleaseURL(fetchURL) if err != nil { - return fmt.Errorf("Error checking for web gui release update, skipping update: %w", err) + return fmt.Errorf("error checking for web gui release update, skipping update: %w", err) } dat, err := ioutil.ReadFile(tagPath) tagsMatch := false diff --git a/fs/sync/sync.go b/fs/sync/sync.go index 58e74e22a..052b867e8 100644 --- a/fs/sync/sync.go +++ b/fs/sync/sync.go @@ -825,7 +825,7 @@ func (s *syncCopyMove) tryRename(src fs.Object) bool { // errorMaxDurationReached defines error when transfer duration is reached // Used for checking on exit and matching to correct exit code. -var errorMaxDurationReached = fserrors.FatalError(errors.New("Max transfer duration reached as set by --max-duration")) +var errorMaxDurationReached = fserrors.FatalError(errors.New("max transfer duration reached as set by --max-duration")) // Syncs fsrc into fdst // diff --git a/fstest/mockobject/mockobject.go b/fstest/mockobject/mockobject.go index 0d0c4516c..084536b85 100644 --- a/fstest/mockobject/mockobject.go +++ b/fstest/mockobject/mockobject.go @@ -140,7 +140,7 @@ func (o *ContentMockObject) Open(ctx context.Context, options ...fs.OpenOption) offset, limit = x.Decode(size) default: if option.Mandatory() { - return nil, fmt.Errorf("Unsupported mandatory option: %v", option) + return nil, fmt.Errorf("unsupported mandatory option: %v", option) } } } diff --git a/fstest/test_all/clean.go b/fstest/test_all/clean.go index c12a4839a..6da23b7e1 100644 --- a/fstest/test_all/clean.go +++ b/fstest/test_all/clean.go @@ -56,7 +56,7 @@ func cleanFs(ctx context.Context, remote string, cleanup bool) error { } err = operations.Purge(ctx, dir, "") if err != nil { - err = fmt.Errorf("Purge failed: %w", err) + err = fmt.Errorf("purge failed: %w", err) lastErr = err fs.Errorf(dir, "%v", err) return nil diff --git a/lib/http/http.go b/lib/http/http.go index ac5befd37..1ff7c1148 100644 --- a/lib/http/http.go +++ b/lib/http/http.go @@ -122,7 +122,7 @@ func useSSL(opt Options) bool { func NewServer(listeners, tlsListeners []net.Listener, opt Options) (Server, error) { // Validate input if len(listeners) == 0 && len(tlsListeners) == 0 { - return nil, errors.New("Can't create server without listeners") + return nil, errors.New("can't create server without listeners") } // Prepare TLS config @@ -130,12 +130,12 @@ func NewServer(listeners, tlsListeners []net.Listener, opt Options) (Server, err useSSL := useSSL(opt) if (len(opt.SslCertBody) > 0) != (len(opt.SslKeyBody) > 0) { - err := errors.New("Need both SslCertBody and SslKeyBody to use SSL") + err := errors.New("need both SslCertBody and SslKeyBody to use SSL") log.Fatalf(err.Error()) return nil, err } if (opt.SslCert != "") != (opt.SslKey != "") { - err := errors.New("Need both -cert and -key to use SSL") + err := errors.New("need both -cert and -key to use SSL") log.Fatalf(err.Error()) return nil, err } @@ -156,12 +156,12 @@ func NewServer(listeners, tlsListeners []net.Listener, opt Options) (Server, err Certificates: []tls.Certificate{cert}, } } else if len(listeners) == 0 && len(tlsListeners) != 0 { - return nil, errors.New("No SslKey or non-tlsListeners") + return nil, errors.New("no SslKey or non-tlsListeners") } if opt.ClientCA != "" { if !useSSL { - err := errors.New("Can't use --client-ca without --cert and --key") + err := errors.New("can't use --client-ca without --cert and --key") log.Fatalf(err.Error()) return nil, err } @@ -172,7 +172,7 @@ func NewServer(listeners, tlsListeners []net.Listener, opt Options) (Server, err return nil, err } if !certpool.AppendCertsFromPEM(pem) { - err := errors.New("Can't parse client certificate authority") + err := errors.New("can't parse client certificate authority") log.Fatalf(err.Error()) return nil, err } diff --git a/lib/jwtutil/jwtutil.go b/lib/jwtutil/jwtutil.go index 8473c45bd..af99ed3f8 100644 --- a/lib/jwtutil/jwtutil.go +++ b/lib/jwtutil/jwtutil.go @@ -78,7 +78,7 @@ func Config(id, name string, claims *jws.ClaimSet, header *jws.Header, queryPara result := &response{} err = json.NewDecoder(strings.NewReader(s)).Decode(result) if result.AccessToken == "" && err == nil { - err = errors.New("No AccessToken in Response") + err = errors.New("no AccessToken in Response") } if err != nil { return fmt.Errorf("jwtutil: failed to get token: %w", err) diff --git a/lib/rest/url.go b/lib/rest/url.go index 4a1d71390..8f443d59d 100644 --- a/lib/rest/url.go +++ b/lib/rest/url.go @@ -11,7 +11,7 @@ import ( func URLJoin(base *url.URL, path string) (*url.URL, error) { rel, err := url.Parse(path) if err != nil { - return nil, fmt.Errorf("Error parsing %q as URL: %w", path, err) + return nil, fmt.Errorf("error parsing %q as URL: %w", path, err) } return base.ResolveReference(rel), nil } diff --git a/vfs/file.go b/vfs/file.go index 4c4fc214a..2d3199f13 100644 --- a/vfs/file.go +++ b/vfs/file.go @@ -400,7 +400,7 @@ func (f *File) _applyPendingModTime() error { defer func() { f.pendingModTime = time.Time{} }() if f.o == nil { - return errors.New("Cannot apply ModTime, file object is not available") + return errors.New("cannot apply ModTime, file object is not available") } dt := f.pendingModTime.Sub(f.o.ModTime(context.Background())) diff --git a/vfs/vfscommon/cachemode.go b/vfs/vfscommon/cachemode.go index a7f35fe9c..a2d91bd4e 100644 --- a/vfs/vfscommon/cachemode.go +++ b/vfs/vfscommon/cachemode.go @@ -40,7 +40,7 @@ func (l *CacheMode) Set(s string) error { return nil } } - return fmt.Errorf("Unknown cache mode level %q", s) + return fmt.Errorf("unknown cache mode level %q", s) } // Type of the value @@ -52,7 +52,7 @@ func (l *CacheMode) Type() string { func (l *CacheMode) UnmarshalJSON(in []byte) error { return fs.UnmarshalJSONFlag(in, l, func(i int64) error { if i < 0 || i >= int64(len(cacheModeToString)) { - return fmt.Errorf("Unknown cache mode level %d", i) + return fmt.Errorf("unknown cache mode level %d", i) } *l = CacheMode(i) return nil diff --git a/vfs/vfsflags/filemode.go b/vfs/vfsflags/filemode.go index 50a8bc423..b1c2e8bcd 100644 --- a/vfs/vfsflags/filemode.go +++ b/vfs/vfsflags/filemode.go @@ -20,7 +20,7 @@ func (x *FileMode) String() string { func (x *FileMode) Set(s string) error { i, err := strconv.ParseInt(s, 8, 64) if err != nil { - return fmt.Errorf("Bad FileMode - must be octal digits: %w", err) + return fmt.Errorf("bad FileMode - must be octal digits: %w", err) } *x.Mode = (os.FileMode)(i) return nil