Convert to using github.com/pkg/errors everywhere

This commit is contained in:
Nick Craig-Wood 2016-06-12 15:06:02 +01:00
parent 7fe653c350
commit 4c5b2833b3
32 changed files with 187 additions and 161 deletions

View File

@ -26,6 +26,7 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/pacer"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"golang.org/x/oauth2"
)
@ -185,7 +186,7 @@ func NewFs(name, root string) (fs.Fs, error) {
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, fmt.Errorf("Failed to get endpoints: %v", err)
return nil, errors.Wrap(err, "failed to get endpoints")
}
// Get rootID
@ -195,7 +196,7 @@ func NewFs(name, root string) (fs.Fs, error) {
return f.shouldRetry(resp, err)
})
if err != nil || rootInfo.Id == nil {
return nil, fmt.Errorf("Failed to get root: %v", err)
return nil, errors.Wrap(err, "failed to get root")
}
f.dirCache = dircache.New(root, *rootInfo.Id, f)
@ -458,7 +459,7 @@ func (f *Fs) Mkdir() error {
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(check bool) error {
if f.root == "" {
return fmt.Errorf("Can't purge root directory")
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(false)
@ -487,7 +488,7 @@ func (f *Fs) purgeCheck(check bool) error {
return err
}
if !empty {
return fmt.Errorf("Directory not empty")
return errors.New("directory not empty")
}
}

View File

@ -10,7 +10,6 @@ package b2
import (
"bytes"
"crypto/sha1"
"errors"
"fmt"
"hash"
"io"
@ -28,6 +27,7 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/pacer"
"github.com/ncw/rclone/rest"
"github.com/pkg/errors"
)
const (
@ -119,7 +119,7 @@ var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = fmt.Errorf("Couldn't find bucket in b2 path %q", path)
err = errors.Errorf("couldn't find bucket in b2 path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
@ -207,7 +207,7 @@ func NewFs(name, root string) (fs.Fs, error) {
}
err = f.authorizeAccount()
if err != nil {
return nil, fmt.Errorf("Failed to authorize account: %v", err)
return nil, errors.Wrap(err, "failed to authorize account")
}
if f.root != "" {
f.root += "/"
@ -247,7 +247,7 @@ func (f *Fs) authorizeAccount() error {
return f.shouldRetryNoReauth(resp, err)
})
if err != nil {
return fmt.Errorf("Failed to authenticate: %v", err)
return errors.Wrap(err, "failed to authenticate")
}
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil
@ -276,7 +276,7 @@ func (f *Fs) getUploadURL() (upload *api.GetUploadURLResponse, err error) {
return f.shouldRetryNoReauth(resp, err)
})
if err != nil {
return nil, fmt.Errorf("Failed to get upload URL: %v", err)
return nil, errors.Wrap(err, "failed to get upload URL")
}
} else {
upload, f.uploads = f.uploads[0], f.uploads[1:]
@ -562,7 +562,7 @@ func (f *Fs) getBucketID() (bucketID string, err error) {
})
if bucketID == "" {
err = fs.ErrorDirNotFound //fmt.Errorf("Couldn't find bucket %q", f.bucket)
err = fs.ErrorDirNotFound
}
f._bucketID = bucketID
return bucketID, err
@ -618,7 +618,7 @@ func (f *Fs) Mkdir() error {
return nil
}
}
return fmt.Errorf("Failed to create bucket: %v", err)
return errors.Wrap(err, "failed to create bucket")
}
f.setBucketID(response.ID)
return nil
@ -649,7 +649,7 @@ func (f *Fs) Rmdir() error {
return f.shouldRetry(resp, err)
})
if err != nil {
return fmt.Errorf("Failed to delete bucket: %v", err)
return errors.Wrap(err, "failed to delete bucket")
}
f.clearBucketID()
f.clearUploadURL()
@ -677,7 +677,7 @@ func (f *Fs) deleteByID(ID, Name string) error {
return f.shouldRetry(resp, err)
})
if err != nil {
return fmt.Errorf("Failed to delete %q: %v", Name, err)
return errors.Wrapf(err, "failed to delete %q", Name)
}
return nil
}
@ -811,7 +811,7 @@ func (o *Object) readMetaData() (err error) {
return err
}
if info == nil {
return fmt.Errorf("Object %q not found", o.remote)
return errors.Errorf("object %q not found", o.remote)
}
return o.decodeMetaData(info)
}
@ -905,14 +905,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes
if file.o.Size() != file.bytes {
return fmt.Errorf("Object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
}
// Check the SHA1
receivedSHA1 := file.resp.Header.Get(sha1Header)
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != calculatedSHA1 {
return fmt.Errorf("Object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
}
return nil
@ -934,7 +934,7 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return nil, fmt.Errorf("Failed to open for download: %v", err)
return nil, errors.Wrap(err, "failed to open for download")
}
// Parse the time out of the headers if possible
@ -1015,7 +1015,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
return err
}
if n != size {
return fmt.Errorf("Read %d bytes expecting %d", n, size)
return errors.Errorf("read %d bytes expecting %d", n, size)
}
calculatedSha1 = fmt.Sprintf("%x", hash.Sum(nil))
@ -1139,7 +1139,7 @@ func (o *Object) Remove() error {
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return fmt.Errorf("Failed to delete file: %v", err)
return errors.Wrap(err, "failed to delete file")
}
return nil
}

View File

@ -4,12 +4,12 @@ package dircache
// _methods are called without the lock
import (
"fmt"
"log"
"strings"
"sync"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// DirCache caches paths to directory IDs and vice versa
@ -159,7 +159,7 @@ func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error
if create {
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
if err != nil {
return "", fmt.Errorf("Failed to make directory: %v", err)
return "", errors.Wrap(err, "failed to make directory")
}
} else {
return "", fs.ErrorDirNotFound
@ -240,13 +240,13 @@ func (dc *DirCache) RootParentID() (string, error) {
dc.mu.Lock()
defer dc.mu.Unlock()
if !dc.foundRoot {
return "", fmt.Errorf("Internal Error: RootID() called before FindRoot")
return "", errors.New("internal error: RootID() called before FindRoot")
}
if dc.rootParentID == "" {
return "", fmt.Errorf("Internal Error: Didn't find rootParentID")
return "", errors.New("internal error: didn't find rootParentID")
}
if dc.rootID == dc.trueRootID {
return "", fmt.Errorf("Is root directory")
return "", errors.New("is root directory")
}
return dc.rootParentID, nil
}

View File

@ -24,6 +24,7 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/pacer"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
@ -220,7 +221,7 @@ OUTER:
return shouldRetry(err)
})
if err != nil {
return false, fmt.Errorf("Couldn't list directory: %s", err)
return false, errors.Wrap(err, "couldn't list directory")
}
for _, item := range files.Items {
if fn(item) {
@ -253,7 +254,7 @@ func (f *Fs) parseExtensions(extensions string) error {
for _, extension := range strings.Split(extensions, ",") {
extension = strings.ToLower(strings.TrimSpace(extension))
if _, found := extensionToMimeType[extension]; !found {
return fmt.Errorf("Couldn't find mime type for extension %q", extension)
return errors.Errorf("couldn't find mime type for extension %q", extension)
}
found := false
for _, existingExtension := range f.extensions {
@ -272,10 +273,10 @@ func (f *Fs) parseExtensions(extensions string) error {
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, path string) (fs.Fs, error) {
if !isPowerOfTwo(int64(chunkSize)) {
return nil, fmt.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
}
if chunkSize < 256*1024 {
return nil, fmt.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
}
oAuthClient, _, err := oauthutil.NewClient(name, driveConfig)
@ -298,7 +299,7 @@ func NewFs(name, path string) (fs.Fs, error) {
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
return nil, fmt.Errorf("Couldn't create Drive client: %s", err)
return nil, errors.Wrap(err, "couldn't create Drive client")
}
// Read About so we know the root path
@ -307,7 +308,7 @@ func NewFs(name, path string) (fs.Fs, error) {
return shouldRetry(err)
})
if err != nil {
return nil, fmt.Errorf("Couldn't read info about Drive: %s", err)
return nil, errors.Wrap(err, "couldn't read info about Drive")
}
f.dirCache = dircache.New(root, f.about.RootFolderId, f)
@ -602,7 +603,7 @@ func (f *Fs) Rmdir() error {
return err
}
if len(children.Items) > 0 {
return fmt.Errorf("Directory not empty: %#v", children.Items)
return errors.Errorf("directory not empty: %#v", children.Items)
}
// Delete the directory if it isn't the root
if f.root != "" {
@ -643,7 +644,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
return nil, fs.ErrorCantCopy
}
if srcObj.isDocument {
return nil, fmt.Errorf("Can't copy a Google document")
return nil, errors.New("can't copy a Google document")
}
o, createInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
@ -671,7 +672,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// result of List()
func (f *Fs) Purge() error {
if f.root == "" {
return fmt.Errorf("Can't purge root directory")
return errors.New("can't purge root directory")
}
err := f.dirCache.FindRoot(false)
if err != nil {
@ -708,7 +709,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
return nil, fs.ErrorCantMove
}
if srcObj.isDocument {
return nil, fmt.Errorf("Can't move a Google document")
return nil, errors.New("can't move a Google document")
}
// Temporary FsObject under construction
@ -857,7 +858,7 @@ func (o *Object) readMetaData() (err error) {
}
if !found {
fs.Debug(o, "Couldn't find object")
return fmt.Errorf("Couldn't find object")
return errors.New("couldn't find object")
}
return nil
}
@ -914,7 +915,7 @@ func (o *Object) Storable() bool {
// using the method passed in
func (o *Object) httpResponse(method string) (res *http.Response, err error) {
if o.url == "" {
return nil, fmt.Errorf("Forbidden to download - check sharing permission")
return nil, errors.New("forbidden to download - check sharing permission")
}
req, err := http.NewRequest(method, o.url, nil)
if err != nil {
@ -970,7 +971,7 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
}
if res.StatusCode != 200 {
_ = res.Body.Close() // ignore error
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
}
// If it is a document, update the size with what we are
// reading as it can change from the HEAD in the listing to
@ -991,7 +992,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
size := src.Size()
modTime := src.ModTime()
if o.isDocument {
return fmt.Errorf("Can't update a google document")
return errors.New("can't update a google document")
}
updateInfo := &drive.File{
Id: o.id,
@ -1025,7 +1026,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
// Remove an object
func (o *Object) Remove() error {
if o.isDocument {
return fmt.Errorf("Can't delete a google document")
return errors.New("can't delete a google document")
}
var err error
err = o.fs.pacer.Call(func() (bool, error) {

View File

@ -1,9 +1,9 @@
package drive
import (
"fmt"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"google.golang.org/api/drive/v2"
)
@ -17,11 +17,15 @@ func TestInternalParseExtensions(t *testing.T) {
{"doc", []string{"doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
{"docx,potato,docx", []string{"docx"}, fmt.Errorf(`Couldn't find mime type for extension "potato"`)},
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
} {
f := new(Fs)
gotErr := f.parseExtensions(test.in)
assert.Equal(t, test.wantErr, gotErr)
if test.wantErr == nil {
assert.NoError(t, gotErr)
} else {
assert.EqualError(t, gotErr, test.wantErr.Error())
}
assert.Equal(t, test.want, f.extensions)
}

View File

@ -21,6 +21,7 @@ import (
"strconv"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"google.golang.org/api/drive/v2"
"google.golang.org/api/googleapi"
)
@ -138,7 +139,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
if err != nil {
return 0, err
}
return 0, fmt.Errorf("unexpected http return code %v", res.StatusCode)
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
@ -147,7 +148,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
return start, nil
}
}
return 0, fmt.Errorf("unable to parse range %q", Range)
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil

View File

@ -9,7 +9,6 @@ File system is case insensitive
import (
"crypto/md5"
"errors"
"fmt"
"io"
"io/ioutil"
@ -21,6 +20,7 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/stacktic/dropbox"
)
@ -148,7 +148,7 @@ func newDropbox(name string) (*dropbox.Dropbox, error) {
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
if uploadChunkSize > maxUploadChunkSize {
return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize)
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
}
db, err := newDropbox(name)
if err != nil {
@ -239,7 +239,7 @@ func strip(path, root string) (string, error) {
}
lowercase := strings.ToLower(path)
if !strings.HasPrefix(lowercase, root) {
return "", fmt.Errorf("Path %q is not under root %q", path, root)
return "", errors.Errorf("path %q is not under root %q", path, root)
}
return path[len(root):], nil
}
@ -267,11 +267,11 @@ func (f *Fs) list(out fs.ListOpts, dir string) {
for {
deltaPage, err := f.db.Delta(cursor, root)
if err != nil {
out.SetError(fmt.Errorf("Couldn't list: %s", err))
out.SetError(errors.Wrap(err, "couldn't list"))
return
}
if deltaPage.Reset && cursor != "" {
err = errors.New("Unexpected reset during listing")
err = errors.New("unexpected reset during listing")
out.SetError(err)
break
}
@ -368,7 +368,7 @@ func (f *Fs) listOneLevel(out fs.ListOpts, dir string) {
}
entry, err := f.db.Metadata(root, true, false, "", "", metadataLimit)
if err != nil {
out.SetError(fmt.Errorf("Couldn't list single level: %s", err))
out.SetError(errors.Wrap(err, "couldn't list single level"))
return
}
for i := range entry.Contents {
@ -448,7 +448,7 @@ func (f *Fs) Mkdir() error {
if entry.IsDir {
return nil
}
return fmt.Errorf("%q already exists as file", f.root)
return errors.Errorf("%q already exists as file", f.root)
}
_, err = f.db.CreateFolder(f.slashRoot)
return err
@ -463,7 +463,7 @@ func (f *Fs) Rmdir() error {
return err
}
if len(entry.Contents) != 0 {
return errors.New("Directory not empty")
return errors.New("directory not empty")
}
return f.Purge()
}
@ -499,7 +499,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstPath := dstObj.remotePath()
entry, err := f.db.Copy(srcPath, dstPath, false)
if err != nil {
return nil, fmt.Errorf("Copy failed: %s", err)
return nil, errors.Wrap(err, "copy failed")
}
dstObj.setMetadataFromEntry(entry)
return dstObj, nil
@ -542,7 +542,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
dstPath := dstObj.remotePath()
entry, err := f.db.Move(srcPath, dstPath)
if err != nil {
return nil, fmt.Errorf("Move failed: %s", err)
return nil, errors.Wrap(err, "move failed")
}
dstObj.setMetadataFromEntry(entry)
return dstObj, nil
@ -571,7 +571,7 @@ func (f *Fs) DirMove(src fs.Fs) error {
// Do the move
_, err = f.db.Move(srcFs.slashRoot, f.slashRoot)
if err != nil {
return fmt.Errorf("MoveDir failed: %v", err)
return errors.Wrap(err, "MoveDir failed")
}
return nil
}
@ -625,7 +625,7 @@ func (o *Object) readEntry() (*dropbox.Entry, error) {
entry, err := o.fs.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
if err != nil {
fs.Debug(o, "Error reading file: %s", err)
return nil, fmt.Errorf("Error reading file: %s", err)
return nil, errors.Wrap(err, "error reading file")
}
return entry, nil
}
@ -717,7 +717,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
}
entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
if err != nil {
return fmt.Errorf("Upload failed: %s", err)
return errors.Wrap(err, "upload failed")
}
o.setMetadataFromEntry(entry)
return nil

View File

@ -1,8 +1,9 @@
package fs
import (
"fmt"
"io"
"github.com/pkg/errors"
)
// asyncReader will do async read-ahead from the input reader
@ -29,13 +30,13 @@ type asyncReader struct {
// When done use Close to release the buffers and close the supplied input.
func newAsyncReader(rd io.ReadCloser, buffers, size int) (io.ReadCloser, error) {
if size <= 0 {
return nil, fmt.Errorf("buffer size too small")
return nil, errors.New("buffer size too small")
}
if buffers <= 0 {
return nil, fmt.Errorf("number of buffers too small")
return nil, errors.New("number of buffers too small")
}
if rd == nil {
return nil, fmt.Errorf("nil reader supplied")
return nil, errors.New("nil reader supplied")
}
a := &asyncReader{}
a.init(rd, buffers, size)

View File

@ -26,6 +26,7 @@ import (
"github.com/Unknwon/goconfig"
"github.com/mreiferson/go-httpclient"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/text/unicode/norm"
@ -128,7 +129,7 @@ func (x SizeSuffix) String() string {
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return fmt.Errorf("Empty string")
return errors.New("empty string")
}
if strings.ToLower(s) == "off" {
*x = -1
@ -150,7 +151,7 @@ func (x *SizeSuffix) Set(s string) error {
case 'g', 'G':
multiplier = 1 << 30
default:
return fmt.Errorf("Bad suffix %q", suffix)
return errors.Errorf("bad suffix %q", suffix)
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
@ -158,7 +159,7 @@ func (x *SizeSuffix) Set(s string) error {
return err
}
if value < 0 {
return fmt.Errorf("Size can't be negative %q", s)
return errors.Errorf("size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
@ -402,7 +403,7 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
break
}
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
return nil, fmt.Errorf("Unsupported configuration encryption. Update rclone for support.")
return nil, errors.New("unsupported configuration encryption - update rclone for support")
}
return goconfig.LoadFromReader(bytes.NewBuffer(b))
}
@ -411,10 +412,10 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
dec := base64.NewDecoder(base64.StdEncoding, r)
box, err := ioutil.ReadAll(dec)
if err != nil {
return nil, fmt.Errorf("Failed to load base64 encoded data: %v", err)
return nil, errors.Wrap(err, "failed to load base64 encoded data")
}
if len(box) < 24+secretbox.Overhead {
return nil, fmt.Errorf("Configuration data too short")
return nil, errors.New("Configuration data too short")
}
envpw := os.Getenv("RCLONE_CONFIG_PASS")
@ -431,7 +432,7 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
}
if len(configKey) == 0 {
if !*AskPassword {
return nil, fmt.Errorf("Unable to decrypt configuration and not allowed to ask for password. Set RCLONE_CONFIG_PASS to your configuration password.")
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
}
getPassword("Enter configuration password:")
}
@ -479,7 +480,7 @@ func getPassword(q string) {
// zero after trimming+normalization, an error is returned.
func setPassword(password string) error {
if !utf8.ValidString(password) {
return fmt.Errorf("Password contains invalid utf8 characters")
return errors.New("password contains invalid utf8 characters")
}
// Remove leading+trailing whitespace
password = strings.TrimSpace(password)
@ -487,7 +488,7 @@ func setPassword(password string) error {
// Normalize to reduce weird variations.
password = norm.NFKC.String(password)
if len(password) == 0 {
return fmt.Errorf("No characters in password")
return errors.New("no characters in password")
}
// Create SHA256 has of the password
sha := sha256.New()

View File

@ -12,6 +12,7 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
@ -227,7 +228,7 @@ func NewFilter() (f *Filter, err error) {
}
f.ModTimeFrom = time.Now().Add(-duration)
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
return nil, fmt.Errorf("Argument --min-age can't be larger than --max-age")
return nil, errors.New("argument --min-age can't be larger than --max-age")
}
Debug(nil, "--max-age %v to %v", duration, f.ModTimeFrom)
}
@ -306,7 +307,7 @@ func (f *Filter) AddRule(rule string) error {
case strings.HasPrefix(rule, "+ "):
return f.Add(true, rule[2:])
}
return fmt.Errorf("Malformed rule %q", rule)
return errors.Errorf("malformed rule %q", rule)
}
// AddFile adds a single file to the files from list

View File

@ -11,6 +11,8 @@ import (
"regexp"
"sort"
"time"
"github.com/pkg/errors"
)
// Constants
@ -29,17 +31,17 @@ var (
// Filesystem registry
fsRegistry []*RegInfo
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
ErrorNotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
ErrorCantPurge = fmt.Errorf("Can't purge directory")
ErrorCantCopy = fmt.Errorf("Can't copy object - incompatible remotes")
ErrorCantMove = fmt.Errorf("Can't move object - incompatible remotes")
ErrorCantDirMove = fmt.Errorf("Can't move directory - incompatible remotes")
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
ErrorCantSetModTime = fmt.Errorf("Can't set modified time")
ErrorDirNotFound = fmt.Errorf("Directory not found")
ErrorLevelNotSupported = fmt.Errorf("Level value not supported")
ErrorListAborted = fmt.Errorf("List aborted")
ErrorListOnlyRoot = fmt.Errorf("Can only list from root")
ErrorNotFoundInConfigFile = errors.New("didn't find section in config file")
ErrorCantPurge = errors.New("can't purge directory")
ErrorCantCopy = errors.New("can't copy object - incompatible remotes")
ErrorCantMove = errors.New("can't move object - incompatible remotes")
ErrorCantDirMove = errors.New("can't move directory - incompatible remotes")
ErrorDirExists = errors.New("can't copy directory - destination already exists")
ErrorCantSetModTime = errors.New("can't set modified time")
ErrorDirNotFound = errors.New("directory not found")
ErrorLevelNotSupported = errors.New("level value not supported")
ErrorListAborted = errors.New("list aborted")
ErrorListOnlyRoot = errors.New("can only list from root")
)
// RegInfo provides information about a filesystem
@ -323,7 +325,7 @@ func Find(name string) (*RegInfo, error) {
return item, nil
}
}
return nil, fmt.Errorf("Didn't find filing system for %q", name)
return nil, errors.Errorf("didn't find filing system for %q", name)
}
// Pattern to match an rclone url

View File

@ -4,9 +4,10 @@ package fs
import (
"bytes"
"fmt"
"regexp"
"strings"
"github.com/pkg/errors"
)
// globToRegexp converts an rsync style glob to a regexp
@ -29,7 +30,7 @@ func globToRegexp(glob string) (*regexp.Regexp, error) {
case 2:
_, _ = re.WriteString(`.*`)
default:
return fmt.Errorf("too many stars in %q", glob)
return errors.Errorf("too many stars in %q", glob)
}
}
consecutiveStars = 0
@ -72,16 +73,16 @@ func globToRegexp(glob string) (*regexp.Regexp, error) {
_, _ = re.WriteRune(c)
inBrackets++
case ']':
return nil, fmt.Errorf("mismatched ']' in glob %q", glob)
return nil, errors.Errorf("mismatched ']' in glob %q", glob)
case '{':
if inBraces {
return nil, fmt.Errorf("can't nest '{' '}' in glob %q", glob)
return nil, errors.Errorf("can't nest '{' '}' in glob %q", glob)
}
inBraces = true
_, _ = re.WriteRune('(')
case '}':
if !inBraces {
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune(')')
inBraces = false
@ -103,15 +104,15 @@ func globToRegexp(glob string) (*regexp.Regexp, error) {
return nil, err
}
if inBrackets > 0 {
return nil, fmt.Errorf("mismatched '[' and ']' in glob %q", glob)
return nil, errors.Errorf("mismatched '[' and ']' in glob %q", glob)
}
if inBraces {
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune('$')
result, err := regexp.Compile(re.String())
if err != nil {
return nil, fmt.Errorf("Bad glob pattern %q: %v (%q)", glob, err, re.String())
return nil, errors.Wrapf(err, "bad glob pattern %q (regexp %q)", glob, re.String())
}
return result, nil
}

View File

@ -37,7 +37,7 @@ func TestGlobToRegexp(t *testing.T) {
{`ab}c`, `(^|/)`, `mismatched '{' and '}'`},
{`ab{c`, `(^|/)`, `mismatched '{' and '}'`},
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
{`[a--b]`, `(^|/)`, `Bad glob pattern`},
{`[a--b]`, `(^|/)`, `bad glob pattern`},
{`a\*b`, `(^|/)a\*b$`, ``},
{`a\\b`, `(^|/)a\\b$`, ``},
} {

View File

@ -8,6 +8,8 @@ import (
"hash"
"io"
"strings"
"github.com/pkg/errors"
)
// HashType indicates a standard hashing algorithm
@ -15,7 +17,7 @@ type HashType int
// ErrHashUnsupported should be returned by filesystem,
// if it is requested to deliver an unsupported hash type.
var ErrHashUnsupported = fmt.Errorf("hash type not supported")
var ErrHashUnsupported = errors.New("hash type not supported")
const (
// HashMD5 indicates MD5 support
@ -82,7 +84,7 @@ func (h HashType) String() string {
// and this function must support all types.
func hashFromTypes(set HashSet) (map[HashType]hash.Hash, error) {
if !set.SubsetOf(SupportedHashes) {
return nil, fmt.Errorf("Requested set %08x contains unknown hash types", int(set))
return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set))
}
var hashers = make(map[HashType]hash.Hash)
types := set.Array()

View File

@ -4,6 +4,8 @@ import (
"fmt"
"io"
"time"
"github.com/pkg/errors"
)
// Limited defines a Fs which can only return the Objects passed in
@ -70,7 +72,7 @@ func (f *Limited) Put(in io.Reader, src ObjectInfo) (Object, error) {
remote := src.Remote()
obj := f.NewFsObject(remote)
if obj == nil {
return nil, fmt.Errorf("Can't create %q in limited fs", remote)
return nil, errors.Errorf("can't create %q in limited fs", remote)
}
return obj, obj.Update(in, src)
}

View File

@ -1,11 +1,11 @@
package fs
import (
"errors"
"io"
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -17,7 +17,7 @@ func TestListerNew(t *testing.T) {
assert.Equal(t, MaxLevel, o.level)
}
var errNotImpl = errors.New("Not implemented")
var errNotImpl = errors.New("not implemented")
type mockObject string

View File

@ -14,6 +14,8 @@ import (
"sync/atomic"
"time"
"github.com/pkg/errors"
"golang.org/x/text/unicode/norm"
)
@ -261,7 +263,7 @@ tryAgain:
// Verify sizes are the same after transfer
if src.Size() != dst.Size() {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
ErrorLog(dst, "%s", err)
removeFailedCopy(dst)
return
@ -287,7 +289,7 @@ tryAgain:
ErrorLog(dst, "Failed to read hash: %s", err)
} else if !HashEquals(srcSum, dstSum) {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
ErrorLog(dst, "%s", err)
removeFailedCopy(dst)
return
@ -796,7 +798,7 @@ func Check(fdst, fsrc Fs) error {
checkerWg.Wait()
Log(fdst, "%d differences found", Stats.GetErrors())
if differences > 0 {
return fmt.Errorf("%d differences found", differences)
return errors.Errorf("%d differences found", differences)
}
return nil
}

View File

@ -15,7 +15,6 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
import (
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
@ -27,13 +26,13 @@ import (
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
"github.com/pkg/errors"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/storage/v1"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
)
const (
@ -182,7 +181,7 @@ var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = fmt.Errorf("Couldn't find bucket in storage path %q", path)
err = errors.Errorf("couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
@ -193,11 +192,11 @@ func parsePath(path string) (bucket, directory string, err error) {
func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
data, err := ioutil.ReadFile(os.ExpandEnv(keyJsonfilePath))
if err != nil {
return nil, fmt.Errorf("error opening credentials file: %v", err)
return nil, errors.Wrap(err, "error opening credentials file")
}
conf, err := google.JWTConfigFromJSON(data, storageConfig.Scopes...)
if err != nil {
return nil, fmt.Errorf("error processing credentials: %v", err)
return nil, errors.Wrap(err, "error processing credentials")
}
ctxWithSpecialClient := oauthutil.Context()
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
@ -245,7 +244,7 @@ func NewFs(name, root string) (fs.Fs, error) {
f.client = oAuthClient
f.svc, err = storage.New(f.client)
if err != nil {
return nil, fmt.Errorf("Couldn't create Google Cloud Storage client: %s", err)
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
}
if f.root != "" {
@ -357,7 +356,7 @@ func (f *Fs) list(dir string, level int, fn listFn) error {
func (f *Fs) listFiles(out fs.ListOpts, dir string) {
defer out.Finished()
if f.bucket == "" {
out.SetError(fmt.Errorf("Can't list objects at root - choose a bucket using lsd"))
out.SetError(errors.New("can't list objects at root - choose a bucket using lsd"))
return
}
// List the objects
@ -398,7 +397,7 @@ func (f *Fs) listBuckets(out fs.ListOpts, dir string) {
return
}
if f.projectNumber == "" {
out.SetError(errors.New("Can't list buckets without project number"))
out.SetError(errors.New("can't list buckets without project number"))
return
}
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
@ -458,7 +457,7 @@ func (f *Fs) Mkdir() error {
}
if f.projectNumber == "" {
return fmt.Errorf("Can't make bucket without project number")
return errors.New("can't make bucket without project number")
}
bucket := storage.Bucket{
@ -670,7 +669,7 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
}
if res.StatusCode != 200 {
_ = res.Body.Close() // ignore error
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}

View File

@ -17,6 +17,7 @@ import (
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/swift"
swiftLib "github.com/ncw/swift"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
@ -118,7 +119,7 @@ func (f *Fs) getCredentials() (err error) {
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return fmt.Errorf("Failed to get credentials: %s", resp.Status)
return errors.Errorf("failed to get credentials: %s", resp.Status)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
@ -128,7 +129,7 @@ func (f *Fs) getCredentials() (err error) {
}
// fs.Debug(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return fmt.Errorf("Couldn't read token, result and expired from credentials")
return errors.New("couldn't read token, result and expired from credentials")
}
f.credentials = result
expires, err := time.Parse(time.RFC3339, result.Expires)
@ -144,7 +145,7 @@ func (f *Fs) getCredentials() (err error) {
func NewFs(name, root string) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(name, oauthConfig)
if err != nil {
return nil, fmt.Errorf("Failed to configure Hubic: %v", err)
return nil, errors.Wrap(err, "failed to configure Hubic")
}
f := &Fs{
@ -161,7 +162,7 @@ func NewFs(name, root string) (fs.Fs, error) {
}
err = c.Authenticate()
if err != nil {
return nil, fmt.Errorf("Error authenticating swift connection: %v", err)
return nil, errors.Wrap(err, "error authenticating swift connection")
}
// Make inner swift Fs from the connection

View File

@ -80,7 +80,7 @@ func NewFs(name, root string) (fs.Fs, error) {
f.root, remote = getDirFile(f.root)
obj := f.NewFsObject(remote)
if obj == nil {
return nil, fmt.Errorf("Failed to make object for %q in %q", remote, f.root)
return nil, errors.Errorf("failed to make object for %q in %q", remote, f.root)
}
// return a Fs Limited to this object
return fs.NewLimited(f, obj), nil
@ -368,7 +368,7 @@ func (f *Fs) Purge() error {
return err
}
if !fi.Mode().IsDir() {
return fmt.Errorf("Can't Purge non directory: %q", f.root)
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
@ -400,7 +400,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
return nil, err
} else if !dstObj.info.Mode().IsRegular() {
// It isn't a file
return nil, fmt.Errorf("Can't move file onto non-file")
return nil, errors.New("can't move file onto non-file")
}
// Create destination
@ -490,7 +490,7 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
oldsize := o.info.Size()
err := o.lstat()
if err != nil {
return "", errors.Wrap(err, "Hash failed to stat")
return "", errors.Wrap(err, "hash: failed to stat")
}
if !o.info.ModTime().Equal(oldtime) || oldsize != o.info.Size() {
@ -501,15 +501,15 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
o.hashes = make(map[fs.HashType]string)
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "Hash failed to open")
return "", errors.Wrap(err, "hash: failed to open")
}
o.hashes, err = fs.HashStream(in)
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "Hash failed to read")
return "", errors.Wrap(err, "hash: failed to read")
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "Hash failed to close")
return "", errors.Wrap(closeErr, "hash: failed to close")
}
}
return o.hashes[r], nil

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/skratchdot/open-golang/open"
"golang.org/x/net/context"
"golang.org/x/oauth2"
@ -58,7 +59,7 @@ func getToken(name string) (*oauth2.Token, error) {
return nil, err
}
if tokenString == "" {
return nil, fmt.Errorf("Empty token found - please run rclone config again")
return nil, errors.New("empty token found - please run rclone config again")
}
token := new(oauth2.Token)
err = json.Unmarshal([]byte(tokenString), token)
@ -301,7 +302,7 @@ func Config(id, name string, config *oauth2.Config) error {
if authCode != "" {
fmt.Printf("Got code\n")
} else {
return fmt.Errorf("Failed to get code")
return errors.New("failed to get code")
}
} else {
// Read the code, and exchange it for a token.
@ -310,14 +311,14 @@ func Config(id, name string, config *oauth2.Config) error {
}
token, err := config.Exchange(oauth2.NoContext, authCode)
if err != nil {
return fmt.Errorf("Failed to get token: %v", err)
return errors.Wrap(err, "failed to get token")
}
// Print code if we do automatic retrieval
if automatic {
result, err := json.Marshal(token)
if err != nil {
return fmt.Errorf("Failed to marshal token: %v", err)
return errors.Wrap(err, "failed to marshal token")
}
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
}

View File

@ -187,7 +187,7 @@ func NewFs(name, root string) (fs.Fs, error) {
// Get rootID
rootInfo, _, err := f.readMetaDataForPath("")
if err != nil || rootInfo.ID == "" {
return nil, fmt.Errorf("Failed to get root: %v", err)
return nil, errors.Wrap(err, "failed to get root")
}
f.dirCache = dircache.New(root, rootInfo.ID, f)
@ -258,7 +258,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
// fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
parent, ok := f.dirCache.GetInv(pathID)
if !ok {
return "", false, fmt.Errorf("Couldn't find parent ID")
return "", false, errors.New("couldn't find parent ID")
}
path := leaf
if parent != "" {
@ -275,7 +275,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
return "", false, err
}
if info.Folder == nil {
return "", false, fmt.Errorf("Found file when looking for folder")
return "", false, errors.New("found file when looking for folder")
}
return info.ID, true, nil
}
@ -467,7 +467,7 @@ func (f *Fs) deleteObject(id string) error {
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(check bool) error {
if f.root == "" {
return fmt.Errorf("Can't purge root directory")
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(false)
@ -480,10 +480,10 @@ func (f *Fs) purgeCheck(check bool) error {
return err
}
if item.Folder == nil {
return fmt.Errorf("Not a folder")
return errors.New("not a folder")
}
if check && item.Folder.ChildCount != 0 {
return fmt.Errorf("Folder not empty")
return errors.New("folder not empty")
}
err = f.deleteObject(rootID)
if err != nil {
@ -533,7 +533,7 @@ func (f *Fs) waitForJob(location string, o *Object) error {
return err
}
if status.Status == "failed" || status.Status == "deleteFailed" {
return fmt.Errorf("Async operation %q returned %q", status.Operation, status.Status)
return errors.Errorf("async operation %q returned %q", status.Operation, status.Status)
}
} else {
var info api.Item
@ -546,7 +546,7 @@ func (f *Fs) waitForJob(location string, o *Object) error {
}
time.Sleep(1 * time.Second)
}
return fmt.Errorf("Async operation didn't complete after %v", fs.Config.Timeout)
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
}
// Copy src to this remote using server side copy operations.
@ -601,7 +601,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// read location header
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Didn't receive location header in copy response")
return nil, errors.New("didn't receive location header in copy response")
}
// Wait for job to finish
@ -764,7 +764,7 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open() (in io.ReadCloser, err error) {
if o.id == "" {
return nil, fmt.Errorf("Can't download no id")
return nil, errors.New("can't download - no id")
}
var resp *http.Response
opts := rest.Opts{
@ -834,7 +834,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
if chunkSize%(320*1024) != 0 {
return fmt.Errorf("Chunk size %d is not a multiple of 320k", chunkSize)
return errors.Errorf("chunk size %d is not a multiple of 320k", chunkSize)
}
// Create upload session

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
func TestNew(t *testing.T) {
@ -310,7 +311,7 @@ func TestEndCallZeroConnections(t *testing.T) {
}
}
var errFoo = fmt.Errorf("Foo")
var errFoo = errors.New("foo")
type dummyPaced struct {
retry bool

View File

@ -6,13 +6,13 @@ package rest
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"sync"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Client contains the info to sustain the API
@ -43,7 +43,7 @@ func defaultErrorHandler(resp *http.Response) (err error) {
if err != nil {
return err
}
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
}
// SetErrorHandler sets the handler to decode an error response when
@ -102,14 +102,14 @@ func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
api.mu.RLock()
defer api.mu.RUnlock()
if opts == nil {
return nil, fmt.Errorf("call() called with nil opts")
return nil, errors.New("call() called with nil opts")
}
var url string
if opts.Absolute {
url = opts.Path
} else {
if api.rootURL == "" {
return nil, fmt.Errorf("RootURL not set")
return nil, errors.New("RootURL not set")
}
url = api.rootURL + opts.Path
}

View File

@ -14,7 +14,6 @@ What happens if you CTRL-C a multipart upload
*/
import (
"errors"
"fmt"
"io"
"net/http"
@ -36,6 +35,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
"github.com/pkg/errors"
)
// Register with Fs
@ -200,7 +200,7 @@ var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
func s3ParsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = fmt.Errorf("Couldn't parse bucket out of s3 path %q", path)
err = errors.Errorf("couldn't parse bucket out of s3 path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
@ -452,7 +452,7 @@ func (f *Fs) listFiles(out fs.ListOpts, dir string) {
defer out.Finished()
if f.bucket == "" {
// Return no objects at top level list
out.SetError(errors.New("Can't list objects at root - choose a bucket using lsd"))
out.SetError(errors.New("can't list objects at root - choose a bucket using lsd"))
return
}
// List the objects and directories

View File

@ -3,7 +3,6 @@ package swift
import (
"bytes"
"errors"
"fmt"
"io"
"path"
@ -14,6 +13,7 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
@ -132,7 +132,7 @@ var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
func parsePath(path string) (container, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = fmt.Errorf("Couldn't find container in swift path %q", path)
err = errors.Errorf("couldn't find container in swift path %q", path)
} else {
container, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
@ -318,7 +318,7 @@ func (f *Fs) list(dir string, level int, fn listFn) error {
func (f *Fs) listFiles(out fs.ListOpts, dir string) {
defer out.Finished()
if f.container == "" {
out.SetError(errors.New("Can't list objects at root - choose a container using lsd"))
out.SetError(errors.New("can't list objects at root - choose a container using lsd"))
return
}
// List the objects

View File

@ -2,13 +2,14 @@ package src
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/pkg/errors"
)
//Client struct

View File

@ -1,9 +1,10 @@
package src
import (
"fmt"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
// PerformDelete does the actual delete via DELETE request.
@ -28,7 +29,7 @@ func (c *Client) PerformDelete(url string) error {
if err != nil {
return err
}
return fmt.Errorf("delete error [%d]: %s", resp.StatusCode, string(body[:]))
return errors.Errorf("delete error [%d]: %s", resp.StatusCode, string(body[:]))
}
return nil
}

View File

@ -1,10 +1,11 @@
package src
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
// PerformDownload does the actual download via unscoped PUT request.
@ -27,7 +28,7 @@ func (c *Client) PerformDownload(url string) (out io.ReadCloser, err error) {
if err != nil {
return nil, err
}
return nil, fmt.Errorf("download error [%d]: %s", resp.StatusCode, string(body[:]))
return nil, errors.Errorf("download error [%d]: %s", resp.StatusCode, string(body[:]))
}
return resp.Body, err
}

View File

@ -1,9 +1,10 @@
package src
import (
"fmt"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
// PerformMkdir does the actual mkdir via PUT request.
@ -27,7 +28,7 @@ func (c *Client) PerformMkdir(url string) (int, string, error) {
return 0, "", err
}
//third parameter is the json error response body
return resp.StatusCode, string(body[:]), fmt.Errorf("Create Folder error [%d]: %s", resp.StatusCode, string(body[:]))
return resp.StatusCode, string(body[:]), errors.Errorf("create folder error [%d]: %s", resp.StatusCode, string(body[:]))
}
return resp.StatusCode, "", nil
}

View File

@ -3,10 +3,11 @@ package src
//from yadisk
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
// PerformUpload does the actual upload via unscoped PUT request.
@ -30,7 +31,7 @@ func (c *Client) PerformUpload(url string, data io.Reader) (err error) {
return err
}
return fmt.Errorf("upload error [%d]: %s", resp.StatusCode, string(body[:]))
return errors.Errorf("upload error [%d]: %s", resp.StatusCode, string(body[:]))
}
return nil
}

View File

@ -407,10 +407,10 @@ func (f *Fs) purgeCheck(check bool) error {
var opt yandex.ResourceInfoRequestOptions
ResourceInfoResponse, err := f.yd.NewResourceInfoRequest(f.diskRoot, opt).Exec()
if err != nil {
return fmt.Errorf("Rmdir failed: %s", err)
return errors.Wrap(err, "rmdir failed")
}
if len(ResourceInfoResponse.Embedded.Items) != 0 {
return fmt.Errorf("Rmdir failed: Directory not empty")
return errors.New("rmdir failed: directory not empty")
}
}
//delete directory