diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go index cf98d4ba9..836cf4be2 100644 --- a/backend/amazonclouddrive/amazonclouddrive.go +++ b/backend/amazonclouddrive/amazonclouddrive.go @@ -881,7 +881,7 @@ func (f *Fs) Precision() time.Duration { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashMD5) + return hash.Set(hash.MD5) } // Copy src to this remote using server side copy operations. @@ -938,8 +938,8 @@ func (o *Object) Remote() string { // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashMD5 { - return "", hash.ErrHashUnsupported + if t != hash.MD5 { + return "", hash.ErrUnsupported } if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil { return *o.info.ContentProperties.Md5, nil diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index b878048ae..20d09c49f 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -629,7 +629,7 @@ func (f *Fs) Precision() time.Duration { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashMD5) + return hash.Set(hash.MD5) } // Purge deletes all the files and directories including the old versions. @@ -697,8 +697,8 @@ func (o *Object) Remote() string { // Hash returns the MD5 of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashMD5 { - return "", hash.ErrHashUnsupported + if t != hash.MD5 { + return "", hash.ErrUnsupported } // Convert base64 encoded md5 into lower case hex if o.md5 == "" { @@ -1065,7 +1065,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio size := src.Size() blob := o.getBlobWithModTime(src.ModTime()) blob.Properties.ContentType = fs.MimeType(o) - if sourceMD5, _ := src.Hash(hash.HashMD5); sourceMD5 != "" { + if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" { sourceMD5bytes, err := hex.DecodeString(sourceMD5) if err == nil { blob.Properties.ContentMD5 = base64.StdEncoding.EncodeToString(sourceMD5bytes) diff --git a/backend/b2/b2.go b/backend/b2/b2.go index dde5bc9f1..f68bcbfe6 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -922,7 +922,7 @@ func (f *Fs) CleanUp() error { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashSHA1) + return hash.Set(hash.SHA1) } // ------------------------------------------------------------ @@ -947,8 +947,8 @@ func (o *Object) Remote() string { // Hash returns the Sha-1 of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashSHA1 { - return "", hash.ErrHashUnsupported + if t != hash.SHA1 { + return "", hash.ErrUnsupported } if o.sha1 == "" { // Error is logged in readMetaData @@ -1286,7 +1286,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio modTime := src.ModTime() - calculatedSha1, _ := src.Hash(hash.HashSHA1) + calculatedSha1, _ := src.Hash(hash.SHA1) if calculatedSha1 == "" { calculatedSha1 = "hex_digits_at_end" har := newHashAppendingReader(in, sha1.New()) diff --git a/backend/b2/upload.go b/backend/b2/upload.go index 6e88055e3..399aaa6a2 100644 --- a/backend/b2/upload.go +++ b/backend/b2/upload.go @@ -115,7 +115,7 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar }, } // Set the SHA1 if known - if calculatedSha1, err := src.Hash(hash.HashSHA1); err == nil && calculatedSha1 != "" { + if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" { request.Info[sha1Key] = calculatedSha1 } var response api.StartLargeFileResponse diff --git a/backend/box/box.go b/backend/box/box.go index 834e02364..58b3c8637 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -831,7 +831,7 @@ func (f *Fs) DirCacheFlush() { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashSHA1) + return hash.Set(hash.SHA1) } // ------------------------------------------------------------ @@ -861,8 +861,8 @@ func (o *Object) srvPath() string { // Hash returns the SHA-1 of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashSHA1 { - return "", hash.ErrHashUnsupported + if t != hash.SHA1 { + return "", hash.ErrUnsupported } return o.sha1, nil } diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index d395385d6..63ce31221 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -310,7 +310,7 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashNone) + return hash.Set(hash.None) } // Mkdir makes the directory (container, bucket) @@ -563,7 +563,7 @@ func (o *Object) Size() int64 { // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(ht hash.Type) (string, error) { - return "", hash.ErrHashUnsupported + return "", hash.ErrUnsupported } // UnWrap returns the wrapped Object diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 7020bc9cb..fdf8289ad 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -1194,7 +1194,7 @@ func (f *Fs) DirCacheFlush() { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashMD5) + return hash.Set(hash.MD5) } // ------------------------------------------------------------ @@ -1219,8 +1219,8 @@ func (o *Object) Remote() string { // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashMD5 { - return "", hash.ErrHashUnsupported + if t != hash.MD5 { + return "", hash.ErrUnsupported } return o.md5sum, nil } diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index 0c91d5f63..89d9567c4 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -685,7 +685,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashDropbox) + return hash.Set(hash.Dropbox) } // ------------------------------------------------------------ @@ -710,8 +710,8 @@ func (o *Object) Remote() string { // Hash returns the dropbox special hash func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashDropbox { - return "", hash.ErrHashUnsupported + if t != hash.Dropbox { + return "", hash.ErrUnsupported } err := o.readMetaData() if err != nil { diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go index 7e84ae671..eb7da5c1f 100644 --- a/backend/ftp/ftp.go +++ b/backend/ftp/ftp.go @@ -569,7 +569,7 @@ func (o *Object) Remote() string { // Hash returns the hash of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - return "", hash.ErrHashUnsupported + return "", hash.ErrUnsupported } // Size returns the size of an object in bytes diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go index 426f31ab4..07c58fe1f 100644 --- a/backend/googlecloudstorage/googlecloudstorage.go +++ b/backend/googlecloudstorage/googlecloudstorage.go @@ -676,7 +676,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashMD5) + return hash.Set(hash.MD5) } // ------------------------------------------------------------ @@ -701,8 +701,8 @@ func (o *Object) Remote() string { // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashMD5 { - return "", hash.ErrHashUnsupported + if t != hash.MD5 { + return "", hash.ErrUnsupported } return o.md5sum, nil } diff --git a/backend/http/http.go b/backend/http/http.go index dcce744a5..4c436fb36 100644 --- a/backend/http/http.go +++ b/backend/http/http.go @@ -367,7 +367,7 @@ func (o *Object) Remote() string { // Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes func (o *Object) Hash(r hash.Type) (string, error) { - return "", hash.ErrHashUnsupported + return "", hash.ErrUnsupported } // Size returns the size in bytes of the remote http file @@ -439,7 +439,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { // Hashes returns hash.HashNone to indicate remote hashing is unavailable func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashNone) + return hash.Set(hash.None) } // Mkdir makes the root directory of the Fs object diff --git a/backend/local/local.go b/backend/local/local.go index 8480d3ede..080947432 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -539,7 +539,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.SupportedHashes + return hash.Supported } // ------------------------------------------------------------ @@ -676,7 +676,7 @@ func (file *localOpenFile) Close() (err error) { // Open an object for read func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { var offset int64 - hashes := hash.SupportedHashes + hashes := hash.Supported for _, option := range options { switch x := option.(type) { case *fs.SeekOption: @@ -721,7 +721,7 @@ func (o *Object) mkdirAll() error { // Update the object from in with modTime and size func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - hashes := hash.SupportedHashes + hashes := hash.Supported for _, option := range options { switch x := option.(type) { case *fs.HashesOption: diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index b36008149..a3a709b2c 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -917,7 +917,7 @@ func (f *Fs) DirCacheFlush() { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashSHA1) + return hash.Set(hash.SHA1) } // ------------------------------------------------------------ @@ -947,8 +947,8 @@ func (o *Object) srvPath() string { // Hash returns the SHA-1 of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashSHA1 { - return "", hash.ErrHashUnsupported + if t != hash.SHA1 { + return "", hash.ErrUnsupported } return o.sha1, nil } diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index e8b4ae97d..10e4d1621 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -813,7 +813,7 @@ func (f *Fs) DirCacheFlush() { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashMD5 | hash.HashSHA1) + return hash.Set(hash.MD5 | hash.SHA1) } // ------------------------------------------------------------ @@ -860,8 +860,8 @@ func (o *Object) getHashes() (err error) { // Hash returns the SHA-1 of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashMD5 && t != hash.HashSHA1 { - return "", hash.ErrHashUnsupported + if t != hash.MD5 && t != hash.SHA1 { + return "", hash.ErrUnsupported } if o.md5 == "" && o.sha1 == "" { err := o.getHashes() @@ -869,7 +869,7 @@ func (o *Object) Hash(t hash.Type) (string, error) { return "", errors.Wrap(err, "failed to get hash") } } - if t == hash.HashMD5 { + if t == hash.MD5 { return o.md5, nil } return o.sha1, nil diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index 91c724a8f..2b5213410 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -307,7 +307,7 @@ func (f *Fs) Precision() time.Duration { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashMD5) + return hash.Set(hash.MD5) //return hash.HashSet(hash.HashNone) } @@ -930,8 +930,8 @@ var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashMD5 { - return "", hash.ErrHashUnsupported + if t != hash.MD5 { + return "", hash.ErrUnsupported } etag := strings.Trim(strings.ToLower(o.etag), `"`) // Check the etag is a valid md5sum diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 2a084ef9a..201e6a2b1 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -810,7 +810,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashMD5) + return hash.Set(hash.MD5) } // ------------------------------------------------------------ @@ -837,8 +837,8 @@ var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashMD5 { - return "", hash.ErrHashUnsupported + if t != hash.MD5 { + return "", hash.ErrUnsupported } hash := strings.Trim(strings.ToLower(o.etag), `"`) // Check the etag is a valid md5sum @@ -1032,7 +1032,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } if size > uploader.PartSize { - hash, err := src.Hash(hash.HashMD5) + hash, err := src.Hash(hash.MD5) if err == nil && matchMd5.MatchString(hash) { hashBytes, err := hex.DecodeString(hash) diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index 907047fe2..91afe6b5a 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -642,18 +642,18 @@ func (f *Fs) Hashes() hash.Set { hashcheckDisabled := config.FileGetBool(f.name, "disable_hashcheck") if hashcheckDisabled { - return hash.Set(hash.HashNone) + return hash.Set(hash.None) } c, err := f.getSftpConnection() if err != nil { fs.Errorf(f, "Couldn't get SSH connection to figure out Hashes: %v", err) - return hash.Set(hash.HashNone) + return hash.Set(hash.None) } defer f.putSftpConnection(&c, err) session, err := c.sshClient.NewSession() if err != nil { - return hash.Set(hash.HashNone) + return hash.Set(hash.None) } sha1Output, _ := session.Output("echo 'abc' | sha1sum") expectedSha1 := "03cfd743661f07975fa2f1220c5194cbaff48451" @@ -661,7 +661,7 @@ func (f *Fs) Hashes() hash.Set { session, err = c.sshClient.NewSession() if err != nil { - return hash.Set(hash.HashNone) + return hash.Set(hash.None) } md5Output, _ := session.Output("echo 'abc' | md5sum") expectedMd5 := "0bee89b07a248e27c83fc3d5951213c1" @@ -672,13 +672,13 @@ func (f *Fs) Hashes() hash.Set { set := hash.NewHashSet() if !sha1Works && !md5Works { - set.Add(hash.HashNone) + set.Add(hash.None) } if sha1Works { - set.Add(hash.HashSHA1) + set.Add(hash.SHA1) } if md5Works { - set.Add(hash.HashMD5) + set.Add(hash.MD5) } _ = session.Close() @@ -707,9 +707,9 @@ func (o *Object) Remote() string { // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *Object) Hash(r hash.Type) (string, error) { - if r == hash.HashMD5 && o.md5sum != nil { + if r == hash.MD5 && o.md5sum != nil { return *o.md5sum, nil - } else if r == hash.HashSHA1 && o.sha1sum != nil { + } else if r == hash.SHA1 && o.sha1sum != nil { return *o.sha1sum, nil } @@ -721,29 +721,29 @@ func (o *Object) Hash(r hash.Type) (string, error) { o.fs.putSftpConnection(&c, err) if err != nil { o.fs.cachedHashes = nil // Something has changed on the remote system - return "", hash.ErrHashUnsupported + return "", hash.ErrUnsupported } - err = hash.ErrHashUnsupported + err = hash.ErrUnsupported var outputBytes []byte escapedPath := shellEscape(o.path()) - if r == hash.HashMD5 { + if r == hash.MD5 { outputBytes, err = session.Output("md5sum " + escapedPath) - } else if r == hash.HashSHA1 { + } else if r == hash.SHA1 { outputBytes, err = session.Output("sha1sum " + escapedPath) } if err != nil { o.fs.cachedHashes = nil // Something has changed on the remote system _ = session.Close() - return "", hash.ErrHashUnsupported + return "", hash.ErrUnsupported } _ = session.Close() str := parseHash(outputBytes) - if r == hash.HashMD5 { + if r == hash.MD5 { o.md5sum = &str - } else if r == hash.HashSHA1 { + } else if r == hash.SHA1 { o.sha1sum = &str } return str, nil diff --git a/backend/swift/swift.go b/backend/swift/swift.go index 7f3037454..acd66df40 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -604,7 +604,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashMD5) + return hash.Set(hash.MD5) } // ------------------------------------------------------------ @@ -629,8 +629,8 @@ func (o *Object) Remote() string { // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashMD5 { - return "", hash.ErrHashUnsupported + if t != hash.MD5 { + return "", hash.ErrUnsupported } isDynamicLargeObject, err := o.isDynamicLargeObject() if err != nil { diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go index 462a6652e..8eef8760d 100644 --- a/backend/webdav/webdav.go +++ b/backend/webdav/webdav.go @@ -768,7 +768,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashNone) + return hash.Set(hash.None) } // ------------------------------------------------------------ @@ -793,8 +793,8 @@ func (o *Object) Remote() string { // Hash returns the SHA-1 of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashSHA1 { - return "", hash.ErrHashUnsupported + if t != hash.SHA1 { + return "", hash.ErrUnsupported } return o.sha1, nil } diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go index 3b621c788..3aa9a2b72 100644 --- a/backend/yandex/yandex.go +++ b/backend/yandex/yandex.go @@ -491,7 +491,7 @@ func (f *Fs) CleanUp() error { // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.HashMD5) + return hash.Set(hash.MD5) } // ------------------------------------------------------------ @@ -516,8 +516,8 @@ func (o *Object) Remote() string { // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { - if t != hash.HashMD5 { - return "", hash.ErrHashUnsupported + if t != hash.MD5 { + return "", hash.ErrUnsupported } return o.md5sum, nil } diff --git a/cmd/cryptcheck/cryptcheck.go b/cmd/cryptcheck/cryptcheck.go index f678d7fac..02143bac2 100644 --- a/cmd/cryptcheck/cryptcheck.go +++ b/cmd/cryptcheck/cryptcheck.go @@ -60,7 +60,7 @@ func cryptCheck(fdst, fsrc fs.Fs) error { // Find a hash to use funderlying := fcrypt.UnWrap() hashType := funderlying.Hashes().GetOne() - if hashType == hash.HashNone { + if hashType == hash.None { return errors.Errorf("%s:%s does not support any hashes", funderlying.Name(), funderlying.Root()) } fs.Infof(nil, "Using %v for hash comparisons", hashType) diff --git a/cmd/lsf/lsf.go b/cmd/lsf/lsf.go index 392075278..b040d3023 100644 --- a/cmd/lsf/lsf.go +++ b/cmd/lsf/lsf.go @@ -20,7 +20,7 @@ var ( separator string dirSlash bool recurse bool - hashType = hash.HashMD5 + hashType = hash.MD5 filesOnly bool dirsOnly bool ) diff --git a/fs/hash/hash.go b/fs/hash/hash.go index d415b18d8..c875f90f8 100644 --- a/fs/hash/hash.go +++ b/fs/hash/hash.go @@ -17,49 +17,49 @@ import ( // Type indicates a standard hashing algorithm type Type int -// ErrHashUnsupported should be returned by filesystem, +// ErrUnsupported should be returned by filesystem, // if it is requested to deliver an unsupported hash type. -var ErrHashUnsupported = errors.New("hash type not supported") +var ErrUnsupported = errors.New("hash type not supported") const ( - // HashMD5 indicates MD5 support - HashMD5 Type = 1 << iota + // MD5 indicates MD5 support + MD5 Type = 1 << iota - // HashSHA1 indicates SHA-1 support - HashSHA1 + // SHA1 indicates SHA-1 support + SHA1 - // HashDropbox indicates Dropbox special hash + // Dropbox indicates Dropbox special hash // https://www.dropbox.com/developers/reference/content-hash - HashDropbox + Dropbox - // HashNone indicates no hashes are supported - HashNone Type = 0 + // None indicates no hashes are supported + None Type = 0 ) -// SupportedHashes returns a set of all the supported hashes by +// Supported returns a set of all the supported hashes by // HashStream and MultiHasher. -var SupportedHashes = NewHashSet(HashMD5, HashSHA1, HashDropbox) +var Supported = NewHashSet(MD5, SHA1, Dropbox) -// HashWidth returns the width in characters for any HashType -var HashWidth = map[Type]int{ - HashMD5: 32, - HashSHA1: 40, - HashDropbox: 64, +// Width returns the width in characters for any HashType +var Width = map[Type]int{ + MD5: 32, + SHA1: 40, + Dropbox: 64, } // Stream will calculate hashes of all supported hash types. func Stream(r io.Reader) (map[Type]string, error) { - return StreamTypes(r, SupportedHashes) + return StreamTypes(r, Supported) } // StreamTypes will calculate hashes of the requested hash types. func StreamTypes(r io.Reader, set Set) (map[Type]string, error) { - hashers, err := hashFromTypes(set) + hashers, err := fromTypes(set) if err != nil { return nil, err } - _, err = io.Copy(hashToMultiWriter(hashers), r) + _, err = io.Copy(toMultiWriter(hashers), r) if err != nil { return nil, err } @@ -74,13 +74,13 @@ func StreamTypes(r io.Reader, set Set) (map[Type]string, error) { // The function will panic if the hash type is unknown. func (h Type) String() string { switch h { - case HashNone: + case None: return "None" - case HashMD5: + case MD5: return "MD5" - case HashSHA1: + case SHA1: return "SHA-1" - case HashDropbox: + case Dropbox: return "DropboxHash" default: err := fmt.Sprintf("internal error: unknown hash type: 0x%x", int(h)) @@ -88,17 +88,17 @@ func (h Type) String() string { } } -// Set a HashType from a flag +// Set a Type from a flag func (h *Type) Set(s string) error { switch s { case "None": - *h = HashNone + *h = None case "MD5": - *h = HashMD5 + *h = MD5 case "SHA-1": - *h = HashSHA1 + *h = SHA1 case "DropboxHash": - *h = HashDropbox + *h = Dropbox default: return errors.Errorf("Unknown hash type %q", s) } @@ -113,22 +113,22 @@ func (h Type) Type() string { // Check it satisfies the interface var _ pflag.Value = (*Type)(nil) -// hashFromTypes will return hashers for all the requested types. +// fromTypes will return hashers for all the requested types. // The types must be a subset of SupportedHashes, // and this function must support all types. -func hashFromTypes(set Set) (map[Type]hash.Hash, error) { - if !set.SubsetOf(SupportedHashes) { +func fromTypes(set Set) (map[Type]hash.Hash, error) { + if !set.SubsetOf(Supported) { return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set)) } var hashers = make(map[Type]hash.Hash) types := set.Array() for _, t := range types { switch t { - case HashMD5: + case MD5: hashers[t] = md5.New() - case HashSHA1: + case SHA1: hashers[t] = sha1.New() - case HashDropbox: + case Dropbox: hashers[t] = dbhash.New() default: err := fmt.Sprintf("internal error: Unsupported hash type %v", t) @@ -138,10 +138,10 @@ func hashFromTypes(set Set) (map[Type]hash.Hash, error) { return hashers, nil } -// hashToMultiWriter will return a set of hashers into a +// toMultiWriter will return a set of hashers into a // single multiwriter, where one write will update all // the hashers. -func hashToMultiWriter(h map[Type]hash.Hash) io.Writer { +func toMultiWriter(h map[Type]hash.Hash) io.Writer { // Convert to to slice var w = make([]io.Writer, 0, len(h)) for _, v := range h { @@ -161,7 +161,7 @@ type MultiHasher struct { // NewMultiHasher will return a hash writer that will write all // supported hash types. func NewMultiHasher() *MultiHasher { - h, err := NewMultiHasherTypes(SupportedHashes) + h, err := NewMultiHasherTypes(Supported) if err != nil { panic("internal error: could not create multihasher") } @@ -171,11 +171,11 @@ func NewMultiHasher() *MultiHasher { // NewMultiHasherTypes will return a hash writer that will write // the requested hash types. func NewMultiHasherTypes(set Set) (*MultiHasher, error) { - hashers, err := hashFromTypes(set) + hashers, err := fromTypes(set) if err != nil { return nil, err } - m := MultiHasher{h: hashers, w: hashToMultiWriter(hashers)} + m := MultiHasher{h: hashers, w: toMultiWriter(hashers)} return &m, nil } @@ -205,7 +205,7 @@ type Set int // NewHashSet will create a new hash set with the hash types supplied func NewHashSet(t ...Type) Set { - h := Set(HashNone) + h := Set(None) return h.Add(t...) } @@ -247,7 +247,7 @@ func (h Set) GetOne() Type { i++ v >>= 1 } - return Type(HashNone) + return Type(None) } // Array returns an array of all hash types in the set diff --git a/fs/hash/hash_test.go b/fs/hash/hash_test.go index ccf06200a..44ecfb3be 100644 --- a/fs/hash/hash_test.go +++ b/fs/hash/hash_test.go @@ -18,42 +18,42 @@ func TestHashSet(t *testing.T) { a := h.Array() assert.Len(t, a, 0) - h = h.Add(hash.HashMD5) + h = h.Add(hash.MD5) assert.Equal(t, 1, h.Count()) - assert.Equal(t, hash.HashMD5, h.GetOne()) + assert.Equal(t, hash.MD5, h.GetOne()) a = h.Array() assert.Len(t, a, 1) - assert.Equal(t, a[0], hash.HashMD5) + assert.Equal(t, a[0], hash.MD5) // Test overlap, with all hashes - h = h.Overlap(hash.SupportedHashes) + h = h.Overlap(hash.Supported) assert.Equal(t, 1, h.Count()) - assert.Equal(t, hash.HashMD5, h.GetOne()) - assert.True(t, h.SubsetOf(hash.SupportedHashes)) - assert.True(t, h.SubsetOf(hash.NewHashSet(hash.HashMD5))) + assert.Equal(t, hash.MD5, h.GetOne()) + assert.True(t, h.SubsetOf(hash.Supported)) + assert.True(t, h.SubsetOf(hash.NewHashSet(hash.MD5))) - h = h.Add(hash.HashSHA1) + h = h.Add(hash.SHA1) assert.Equal(t, 2, h.Count()) one := h.GetOne() - if !(one == hash.HashMD5 || one == hash.HashSHA1) { + if !(one == hash.MD5 || one == hash.SHA1) { t.Fatalf("expected to be either MD5 or SHA1, got %v", one) } - assert.True(t, h.SubsetOf(hash.SupportedHashes)) - assert.False(t, h.SubsetOf(hash.NewHashSet(hash.HashMD5))) - assert.False(t, h.SubsetOf(hash.NewHashSet(hash.HashSHA1))) - assert.True(t, h.SubsetOf(hash.NewHashSet(hash.HashMD5, hash.HashSHA1))) + assert.True(t, h.SubsetOf(hash.Supported)) + assert.False(t, h.SubsetOf(hash.NewHashSet(hash.MD5))) + assert.False(t, h.SubsetOf(hash.NewHashSet(hash.SHA1))) + assert.True(t, h.SubsetOf(hash.NewHashSet(hash.MD5, hash.SHA1))) a = h.Array() assert.Len(t, a, 2) - ol := h.Overlap(hash.NewHashSet(hash.HashMD5)) + ol := h.Overlap(hash.NewHashSet(hash.MD5)) assert.Equal(t, 1, ol.Count()) - assert.True(t, ol.Contains(hash.HashMD5)) - assert.False(t, ol.Contains(hash.HashSHA1)) + assert.True(t, ol.Contains(hash.MD5)) + assert.False(t, ol.Contains(hash.SHA1)) - ol = h.Overlap(hash.NewHashSet(hash.HashMD5, hash.HashSHA1)) + ol = h.Overlap(hash.NewHashSet(hash.MD5, hash.SHA1)) assert.Equal(t, 2, ol.Count()) - assert.True(t, ol.Contains(hash.HashMD5)) - assert.True(t, ol.Contains(hash.HashSHA1)) + assert.True(t, ol.Contains(hash.MD5)) + assert.True(t, ol.Contains(hash.SHA1)) } type hashTest struct { @@ -65,18 +65,18 @@ var hashTestSet = []hashTest{ { input: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, output: map[hash.Type]string{ - hash.HashMD5: "bf13fc19e5151ac57d4252e0e0f87abe", - hash.HashSHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166", - hash.HashDropbox: "214d2fcf3566e94c99ad2f59bd993daca46d8521a0c447adf4b324f53fddc0c7", + hash.MD5: "bf13fc19e5151ac57d4252e0e0f87abe", + hash.SHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166", + hash.Dropbox: "214d2fcf3566e94c99ad2f59bd993daca46d8521a0c447adf4b324f53fddc0c7", }, }, // Empty data set { input: []byte{}, output: map[hash.Type]string{ - hash.HashMD5: "d41d8cd98f00b204e9800998ecf8427e", - hash.HashSHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709", - hash.HashDropbox: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + hash.MD5: "d41d8cd98f00b204e9800998ecf8427e", + hash.SHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + hash.Dropbox: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, }, } @@ -103,7 +103,7 @@ func TestMultiHasher(t *testing.T) { } func TestMultiHasherTypes(t *testing.T) { - h := hash.HashSHA1 + h := hash.SHA1 for _, test := range hashTestSet { mh, err := hash.NewMultiHasherTypes(hash.NewHashSet(h)) if err != nil { @@ -137,7 +137,7 @@ func TestHashStream(t *testing.T) { } func TestHashStreamTypes(t *testing.T) { - h := hash.HashSHA1 + h := hash.SHA1 for _, test := range hashTestSet { sums, err := hash.StreamTypes(bytes.NewBuffer(test.input), hash.NewHashSet(h)) require.NoError(t, err) @@ -147,17 +147,17 @@ func TestHashStreamTypes(t *testing.T) { } func TestHashSetStringer(t *testing.T) { - h := hash.NewHashSet(hash.HashSHA1, hash.HashMD5, hash.HashDropbox) + h := hash.NewHashSet(hash.SHA1, hash.MD5, hash.Dropbox) assert.Equal(t, h.String(), "[MD5, SHA-1, DropboxHash]") - h = hash.NewHashSet(hash.HashSHA1) + h = hash.NewHashSet(hash.SHA1) assert.Equal(t, h.String(), "[SHA-1]") h = hash.NewHashSet() assert.Equal(t, h.String(), "[]") } func TestHashStringer(t *testing.T) { - h := hash.HashMD5 + h := hash.MD5 assert.Equal(t, h.String(), "MD5") - h = hash.HashNone + h = hash.None assert.Equal(t, h.String(), "None") } diff --git a/fs/object/object.go b/fs/object/object.go index c4ac9b7fd..e11827b9b 100644 --- a/fs/object/object.go +++ b/fs/object/object.go @@ -51,12 +51,12 @@ func (i *staticObjectInfo) Size() int64 { return i.size } func (i *staticObjectInfo) Storable() bool { return i.storable } func (i *staticObjectInfo) Hash(h hash.Type) (string, error) { if len(i.hashes) == 0 { - return "", hash.ErrHashUnsupported + return "", hash.ErrUnsupported } if hash, ok := i.hashes[h]; ok { return hash, nil } - return "", hash.ErrHashUnsupported + return "", hash.ErrUnsupported } // MemoryFs is an in memory Fs, it only supports FsInfo and Put @@ -78,7 +78,7 @@ func (memoryFs) String() string { return "memory" } func (memoryFs) Precision() time.Duration { return time.Nanosecond } // Returns the supported hash types of the filesystem -func (memoryFs) Hashes() hash.Set { return hash.SupportedHashes } +func (memoryFs) Hashes() hash.Set { return hash.Supported } // Features returns the optional features of this Fs func (memoryFs) Features() *fs.Features { return &fs.Features{} } diff --git a/fs/object/object_test.go b/fs/object/object_test.go index 56e1480f2..b7e001f94 100644 --- a/fs/object/object_test.go +++ b/fs/object/object_test.go @@ -27,23 +27,23 @@ func TestStaticObject(t *testing.T) { assert.Equal(t, size, o.Size()) assert.Equal(t, true, o.Storable()) - Hash, err := o.Hash(hash.HashMD5) + Hash, err := o.Hash(hash.MD5) assert.NoError(t, err) assert.Equal(t, "", Hash) o = object.NewStaticObjectInfo(remote, now, size, true, nil, nil) - _, err = o.Hash(hash.HashMD5) - assert.Equal(t, hash.ErrHashUnsupported, err) + _, err = o.Hash(hash.MD5) + assert.Equal(t, hash.ErrUnsupported, err) hs := map[hash.Type]string{ - hash.HashMD5: "potato", + hash.MD5: "potato", } o = object.NewStaticObjectInfo(remote, now, size, true, hs, nil) - Hash, err = o.Hash(hash.HashMD5) + Hash, err = o.Hash(hash.MD5) assert.NoError(t, err) assert.Equal(t, "potato", Hash) - _, err = o.Hash(hash.HashSHA1) - assert.Equal(t, hash.ErrHashUnsupported, err) + _, err = o.Hash(hash.SHA1) + assert.Equal(t, hash.ErrUnsupported, err) } func TestMemoryFs(t *testing.T) { @@ -52,7 +52,7 @@ func TestMemoryFs(t *testing.T) { assert.Equal(t, "", f.Root()) assert.Equal(t, "memory", f.String()) assert.Equal(t, time.Nanosecond, f.Precision()) - assert.Equal(t, hash.SupportedHashes, f.Hashes()) + assert.Equal(t, hash.Supported, f.Hashes()) assert.Equal(t, &fs.Features{}, f.Features()) entries, err := f.List("") @@ -68,7 +68,7 @@ func TestMemoryFs(t *testing.T) { src := object.NewStaticObjectInfo("remote", now, int64(buf.Len()), true, nil, nil) o, err = f.Put(buf, src) assert.NoError(t, err) - hash, err := o.Hash(hash.HashSHA1) + hash, err := o.Hash(hash.SHA1) assert.NoError(t, err) assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", hash) @@ -95,11 +95,11 @@ func TestMemoryObject(t *testing.T) { assert.Equal(t, int64(len(content)), o.Size()) assert.Equal(t, true, o.Storable()) - Hash, err := o.Hash(hash.HashMD5) + Hash, err := o.Hash(hash.MD5) assert.NoError(t, err) assert.Equal(t, "8ee2027983915ec78acc45027d874316", Hash) - Hash, err = o.Hash(hash.HashSHA1) + Hash, err = o.Hash(hash.SHA1) assert.NoError(t, err) assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", Hash) diff --git a/fs/operations/operations.go b/fs/operations/operations.go index b524b0b68..ea91b3d52 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -46,7 +46,7 @@ func CheckHashes(src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, er common := src.Fs().Hashes().Overlap(dst.Fs().Hashes()) // fs.Debugf(nil, "Shared hashes: %v", common) if common.Count() == 0 { - return true, hash.HashNone, nil + return true, hash.None, nil } ht = common.GetOne() srcHash, err := src.Hash(ht) @@ -56,7 +56,7 @@ func CheckHashes(src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, er return false, ht, err } if srcHash == "" { - return true, hash.HashNone, nil + return true, hash.None, nil } dstHash, err := dst.Hash(ht) if err != nil { @@ -65,7 +65,7 @@ func CheckHashes(src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, er return false, ht, err } if dstHash == "" { - return true, hash.HashNone, nil + return true, hash.None, nil } if srcHash != dstHash { fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs()) @@ -118,7 +118,7 @@ func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { fs.Debugf(src, "%v differ", ht) return false } - if ht == hash.HashNone { + if ht == hash.None { fs.Debugf(src, "Size of src and dst objects identical") } else { fs.Debugf(src, "Size and %v of src and dst objects identical", ht) @@ -148,7 +148,7 @@ func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool { fs.Debugf(src, "%v differ", ht) return false } - if ht == hash.HashNone { + if ht == hash.None { // if couldn't check hash, return that they differ return false } @@ -242,7 +242,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec doUpdate := dst != nil // work out which hash to use - limit to 1 hash in common var common hash.Set - hashType := hash.HashNone + hashType := hash.None if !fs.Config.SizeOnly { common = src.Fs().Hashes().Overlap(f.Hashes()) if common.Count() > 0 { @@ -321,7 +321,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec // Verify hashes are the same after transfer - ignoring blank hashes // TODO(klauspost): This could be extended, so we always create a hash type matching // the destination, and calculate it while sending. - if hashType != hash.HashNone { + if hashType != hash.None { var srcSum string srcSum, err = src.Hash(hashType) if err != nil { @@ -533,7 +533,7 @@ func checkIdentical(dst, src fs.Object) (differ bool, noHash bool) { // CheckHashes will log and count errors return true, false } - if ht == hash.HashNone { + if ht == hash.None { return false, true } if !same { @@ -817,7 +817,7 @@ func ListLong(f fs.Fs, w io.Writer) error { // // Lists in parallel which may get them out of order func Md5sum(f fs.Fs, w io.Writer) error { - return hashLister(hash.HashMD5, f, w) + return hashLister(hash.MD5, f, w) } // Sha1sum list the Fs to the supplied writer @@ -826,7 +826,7 @@ func Md5sum(f fs.Fs, w io.Writer) error { // // Lists in parallel which may get them out of order func Sha1sum(f fs.Fs, w io.Writer) error { - return hashLister(hash.HashSHA1, f, w) + return hashLister(hash.SHA1, f, w) } // DropboxHashSum list the Fs to the supplied writer @@ -835,7 +835,7 @@ func Sha1sum(f fs.Fs, w io.Writer) error { // // Lists in parallel which may get them out of order func DropboxHashSum(f fs.Fs, w io.Writer) error { - return hashLister(hash.HashDropbox, f, w) + return hashLister(hash.Dropbox, f, w) } // hashSum returns the human readable hash for ht passed in. This may @@ -844,7 +844,7 @@ func hashSum(ht hash.Type, o fs.Object) string { accounting.Stats.Checking(o.Remote()) sum, err := o.Hash(ht) accounting.Stats.DoneChecking(o.Remote()) - if err == hash.ErrHashUnsupported { + if err == hash.ErrUnsupported { sum = "UNSUPPORTED" } else if err != nil { fs.Debugf(o, "Failed to read %v: %v", ht, err) @@ -856,7 +856,7 @@ func hashSum(ht hash.Type, o fs.Object) string { func hashLister(ht hash.Type, f fs.Fs, w io.Writer) error { return ListFn(f, func(o fs.Object) { sum := hashSum(ht, o) - syncFprintf(w, "%*s %s\n", hash.HashWidth[ht], sum, o.Remote()) + syncFprintf(w, "%*s %s\n", hash.Width[ht], sum, o.Remote()) }) } @@ -1022,7 +1022,7 @@ func dedupeDeleteIdentical(remote string, objs []fs.Object) []fs.Object { // See how many of these duplicates are identical byHash := make(map[string][]fs.Object, len(objs)) for _, o := range objs { - md5sum, err := o.Hash(hash.HashMD5) + md5sum, err := o.Hash(hash.MD5) if err == nil { byHash[md5sum] = append(byHash[md5sum], o) } @@ -1047,7 +1047,7 @@ func dedupeDeleteIdentical(remote string, objs []fs.Object) []fs.Object { func dedupeInteractive(remote string, objs []fs.Object) { fmt.Printf("%s: %d duplicates remain\n", remote, len(objs)) for i, o := range objs { - md5sum, err := o.Hash(hash.HashMD5) + md5sum, err := o.Hash(hash.MD5) if err != nil { md5sum = err.Error() } diff --git a/fs/operations/operations_test.go b/fs/operations/operations_test.go index b71c81220..1e3e38f0b 100644 --- a/fs/operations/operations_test.go +++ b/fs/operations/operations_test.go @@ -308,7 +308,7 @@ func skipIfCantDedupe(t *testing.T, f fs.Fs) { if !f.Features().DuplicateFiles { t.Skip("Can't test deduplicate - no duplicate files possible") } - if !f.Hashes().Contains(hash.HashMD5) { + if !f.Hashes().Contains(hash.MD5) { t.Skip("Can't test deduplicate - MD5 not supported") } } @@ -892,9 +892,9 @@ func TestListFormat(t *testing.T) { ht hash.Type want string }{ - {hash.HashMD5, "0cc175b9c0f1b6a831c399e269772661"}, - {hash.HashSHA1, "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"}, - {hash.HashDropbox, "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8"}, + {hash.MD5, "0cc175b9c0f1b6a831c399e269772661"}, + {hash.SHA1, "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8"}, + {hash.Dropbox, "bf5d3affb73efd2ec6c36ad3112dd933efed63c4e1cbffcfa88e2759c144f2d8"}, } { list.SetOutput(nil) list.AddHash(test.ht) diff --git a/fs/sync/sync.go b/fs/sync/sync.go index c91f6f701..2409c69f8 100644 --- a/fs/sync/sync.go +++ b/fs/sync/sync.go @@ -87,7 +87,7 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de fs.Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy") s.trackRenames = false } - if s.commonHash == hash.HashNone { + if s.commonHash == hash.None { fs.Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash") s.trackRenames = false } diff --git a/fs/sync/sync_test.go b/fs/sync/sync_test.go index f0d007018..687bd67e8 100644 --- a/fs/sync/sync_test.go +++ b/fs/sync/sync_test.go @@ -751,7 +751,7 @@ func TestSyncWithTrackRenames(t *testing.T) { }() - haveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != hash.HashNone + haveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != hash.None canTrackRenames := haveHash && operations.CanServerSideMove(r.Fremote) t.Logf("Can track renames: %v", canTrackRenames)