fs: deglobalise the config #4685

This is done by making fs.Config private and attaching it to the
context instead.

The Config should be obtained with fs.GetConfig and fs.AddConfig
should be used to get a new mutable config that can be changed.
This commit is contained in:
Nick Craig-Wood 2020-11-05 11:33:32 +00:00
parent 506342317b
commit 2e21c58e6a
93 changed files with 1128 additions and 847 deletions

View File

@ -144,6 +144,7 @@ type Fs struct {
name string // name of this remote name string // name of this remote
features *fs.Features // optional features features *fs.Features // optional features
opt Options // options for this Fs opt Options // options for this Fs
ci *fs.ConfigInfo // global config
c *acd.Client // the connection to the acd server c *acd.Client // the connection to the acd server
noAuthClient *http.Client // unauthenticated http client noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on root string // the path we are working on
@ -247,7 +248,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
root = parsePath(root) root = parsePath(root)
baseClient := fshttp.NewClient(fs.Config) baseClient := fshttp.NewClient(fs.GetConfig(ctx))
if do, ok := baseClient.Transport.(interface { if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request)) SetRequestFilter(f func(req *http.Request))
}); ok { }); ok {
@ -261,13 +262,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
c := acd.NewClient(oAuthClient) c := acd.NewClient(oAuthClient)
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci,
c: c, c: c,
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))), pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(fs.Config), noAuthClient: fshttp.NewClient(ci),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@ -501,7 +504,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil { if err != nil {
return nil, err return nil, err
} }
maxTries := fs.Config.LowLevelRetries maxTries := f.ci.LowLevelRetries
var iErr error var iErr error
for tries := 1; tries <= maxTries; tries++ { for tries := 1; tries <= maxTries; tries++ {
entries = nil entries = nil
@ -716,7 +719,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dstObj fs.Object dstObj fs.Object
srcErr, dstErr error srcErr, dstErr error
) )
for i := 1; i <= fs.Config.LowLevelRetries; i++ { for i := 1; i <= f.ci.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object _, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound { if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source // exit if error on source
@ -731,7 +734,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// finished if src not found and dst found // finished if src not found and dst found
break break
} }
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries) fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
return dstObj, dstErr return dstObj, dstErr

View File

@ -187,6 +187,7 @@ type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed config options opt Options // parsed config options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features features *fs.Features // optional features
client *http.Client // http client we are using client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL svcURL *azblob.ServiceURL // reference to serviceURL
@ -409,18 +410,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive)) string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), ci: ci,
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers), pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
client: fshttp.NewClient(fs.Config), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
client: fshttp.NewClient(fs.GetConfig(ctx)),
cache: bucket.NewCache(), cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1), cntURLcache: make(map[string]*azblob.ContainerURL, 1),
pool: pool.New( pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime), time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize), int(opt.ChunkSize),
fs.Config.Transfers, ci.Transfers,
opt.MemoryPoolUseMmap, opt.MemoryPoolUseMmap,
), ),
} }
@ -1035,7 +1038,7 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
return pool.New( return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime), time.Duration(f.opt.MemoryPoolFlushTime),
int(size), int(size),
fs.Config.Transfers, f.ci.Transfers,
f.opt.MemoryPoolUseMmap, f.opt.MemoryPoolUseMmap,
) )
} }

View File

@ -214,6 +214,7 @@ type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed config options opt Options // parsed config options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the b2 server srv *rest.Client // the connection to the b2 server
rootBucket string // bucket part of root (if any) rootBucket string // bucket part of root (if any)
@ -415,20 +416,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.Endpoint == "" { if opt.Endpoint == "" {
opt.Endpoint = defaultEndpoint opt.Endpoint = defaultEndpoint
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), ci: ci,
srv: rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetErrorHandler(errorHandler),
cache: bucket.NewCache(), cache: bucket.NewCache(),
_bucketID: make(map[string]string, 1), _bucketID: make(map[string]string, 1),
_bucketType: make(map[string]string, 1), _bucketType: make(map[string]string, 1),
uploads: make(map[string][]*api.GetUploadURLResponse), uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New( pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime), time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize), int(opt.ChunkSize),
fs.Config.Transfers, ci.Transfers,
opt.MemoryPoolUseMmap, opt.MemoryPoolUseMmap,
), ),
} }
@ -1167,10 +1170,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
} }
// Delete Config.Transfers in parallel // Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, fs.Config.Transfers) toBeDeleted := make(chan *api.File, f.ci.Transfers)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(fs.Config.Transfers) wg.Add(f.ci.Transfers)
for i := 0; i < fs.Config.Transfers; i++ { for i := 0; i < f.ci.Transfers; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
for object := range toBeDeleted { for object := range toBeDeleted {

View File

@ -91,7 +91,7 @@ func init() {
var err error var err error
// If using box config.json, use JWT auth // If using box config.json, use JWT auth
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" { if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(jsonFile, boxSubType, name, m) err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil { if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err) log.Fatalf("Failed to configure token with jwt authentication: %v", err)
} }
@ -153,7 +153,7 @@ func init() {
}) })
} }
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error { func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
jsonFile = env.ShellExpand(jsonFile) jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile) boxConfig, err := getBoxConfig(jsonFile)
if err != nil { if err != nil {
@ -169,7 +169,7 @@ func refreshJWTToken(jsonFile string, boxSubType string, name string, m configma
} }
signingHeaders := getSigningHeaders(boxConfig) signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig) queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config) client := fshttp.NewClient(fs.GetConfig(ctx))
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client) err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err return err
} }
@ -386,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
root = parsePath(root) root = parsePath(root)
client := fshttp.NewClient(fs.Config) client := fshttp.NewClient(fs.GetConfig(ctx))
var ts *oauthutil.TokenSource var ts *oauthutil.TokenSource
// If not using an accessToken, create an oauth client and tokensource // If not using an accessToken, create an oauth client and tokensource
if opt.AccessToken == "" { if opt.AccessToken == "" {
@ -396,13 +396,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL), srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@ -423,7 +424,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// should do so whether there are uploads pending or not. // should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" { if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(jsonFile, boxSubType, name, m) err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
return err return err
}) })
f.tokenRenewer.Start() f.tokenRenewer.Start()

View File

@ -925,7 +925,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true}) boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err) require.NoError(t, err)
fs.Config.LowLevelRetries = 1 ci := fs.GetConfig(context.Background())
ci.LowLevelRetries = 1
// Instantiate root // Instantiate root
if purge { if purge {

View File

@ -564,6 +564,7 @@ type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on root string // the path we are working on
opt Options // parsed options opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features features *fs.Features // optional features
svc *drive.Service // the connection to the drive server svc *drive.Service // the connection to the drive server
v2Svc *drive_v2.Service // used to create download links for the v2 api v2Svc *drive_v2.Service // used to create download links for the v2 api
@ -940,8 +941,10 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
// Figure out if the user wants to use a team drive // Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error { func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
ci := fs.GetConfig(ctx)
// Stop if we are running non-interactive config // Stop if we are running non-interactive config
if fs.Config.AutoConfirm { if ci.AutoConfirm {
return nil return nil
} }
if opt.TeamDriveID == "" { if opt.TeamDriveID == "" {
@ -979,8 +982,8 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
} }
// getClient makes an http client according to the options // getClient makes an http client according to the options
func getClient(opt *Options) *http.Client { func getClient(ctx context.Context, opt *Options) *http.Client {
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) { t := fshttp.NewTransportCustom(fs.GetConfig(ctx), func(t *http.Transport) {
if opt.DisableHTTP2 { if opt.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{} t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
} }
@ -999,7 +1002,7 @@ func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData
if opt.Impersonate != "" { if opt.Impersonate != "" {
conf.Subject = opt.Impersonate conf.Subject = opt.Impersonate
} }
ctxWithSpecialClient := oauthutil.Context(ctx, getClient(opt)) ctxWithSpecialClient := oauthutil.Context(ctx, getClient(ctx, opt))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
} }
@ -1021,7 +1024,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
return nil, errors.Wrap(err, "failed to create oauth client from service account") return nil, errors.Wrap(err, "failed to create oauth client from service account")
} }
} else { } else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(opt)) oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client") return nil, errors.Wrap(err, "failed to create oauth client")
} }
@ -1090,11 +1093,13 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
return nil, err return nil, err
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))), ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
m: m, m: m,
grouping: listRGrouping, grouping: listRGrouping,
listRmu: new(sync.Mutex), listRmu: new(sync.Mutex),
@ -1803,7 +1808,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu := sync.Mutex{} // protects in and overflow mu := sync.Mutex{} // protects in and overflow
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
in := make(chan listREntry, listRInputBuffer) in := make(chan listREntry, listRInputBuffer)
out := make(chan error, fs.Config.Checkers) out := make(chan error, f.ci.Checkers)
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
overflow := []listREntry{} overflow := []listREntry{}
listed := 0 listed := 0
@ -1842,7 +1847,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg.Add(1) wg.Add(1)
in <- listREntry{directoryID, dir} in <- listREntry{directoryID, dir}
for i := 0; i < fs.Config.Checkers; i++ { for i := 0; i < f.ci.Checkers; i++ {
go f.listRRunner(ctx, &wg, in, out, cb, sendJob) go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
} }
go func() { go func() {
@ -1875,7 +1880,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu.Unlock() mu.Unlock()
}() }()
// wait until the all workers to finish // wait until the all workers to finish
for i := 0; i < fs.Config.Checkers; i++ { for i := 0; i < f.ci.Checkers; i++ {
e := <-out e := <-out
mu.Lock() mu.Lock()
// if one worker returns an error early, close the input so all other workers exit // if one worker returns an error early, close the input so all other workers exit

View File

@ -324,7 +324,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
config := dropbox.Config{ config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo

View File

@ -186,7 +186,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
baseClient: &http.Client{}, baseClient: &http.Client{},
} }
@ -195,7 +195,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
}).Fill(ctx, f) }).Fill(ctx, f)
client := fshttp.NewClient(fs.Config) client := fshttp.NewClient(fs.GetConfig(ctx))
f.rest = rest.NewClient(client).SetRoot(apiBaseURL) f.rest = rest.NewClient(client).SetRoot(apiBaseURL)

View File

@ -4,13 +4,11 @@ package fichier
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:", RemoteName: "TestFichier:",
}) })

View File

@ -425,7 +425,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
root = parsePath(root) root = parsePath(root)
client := fshttp.NewClient(fs.Config) client := fshttp.NewClient(fs.GetConfig(ctx))
f := &Fs{ f := &Fs{
name: name, name: name,
@ -433,7 +433,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
m: m, m: m,
srv: rest.NewClient(client).SetRoot(opt.URL), srv: rest.NewClient(client).SetRoot(opt.URL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
token: opt.Token, token: opt.Token,
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@ -122,10 +122,11 @@ type Options struct {
// Fs represents a remote FTP server // Fs represents a remote FTP server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features ci *fs.ConfigInfo // global config
features *fs.Features // optional features
url string url string
user string user string
pass string pass string
@ -210,9 +211,9 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
} }
// Open a new connection to the FTP server. // Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) { func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)} ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(f.ci.ConnectTimeout)}
if f.opt.TLS && f.opt.ExplicitTLS { if f.opt.TLS && f.opt.ExplicitTLS {
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config") fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config") return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
@ -235,8 +236,8 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
if f.opt.DisableMLSD { if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true)) ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
} }
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 { if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0})) ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
} }
c, err := ftp.Dial(f.dialAddr, ftpConfig...) c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil { if err != nil {
@ -253,7 +254,7 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
} }
// Get an FTP connection from the pool, or open a new one // Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) { func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 { if f.opt.Concurrency > 0 {
f.tokens.Get() f.tokens.Get()
} }
@ -266,7 +267,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if c != nil { if c != nil {
return c, nil return c, nil
} }
c, err = f.ftpConnection() c, err = f.ftpConnection(ctx)
if err != nil && f.opt.Concurrency > 0 { if err != nil && f.opt.Concurrency > 0 {
f.tokens.Put() f.tokens.Put()
} }
@ -336,10 +337,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
protocol = "ftps://" protocol = "ftps://"
} }
u := protocol + path.Join(dialAddr+"/", root) u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci,
url: u, url: u,
user: user, user: user,
pass: pass, pass: pass,
@ -350,7 +353,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Make a connection and pool it to return errors early // Make a connection and pool it to return errors early
c, err := f.getFtpConnection() c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "NewFs") return nil, errors.Wrap(err, "NewFs")
} }
@ -421,7 +424,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
} }
// findItem finds a directory entry for the name in its parent directory // findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) { func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote) fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" { if fullPath == "" || fullPath == "." || fullPath == "/" {
@ -435,7 +438,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
dir := path.Dir(fullPath) dir := path.Dir(fullPath)
base := path.Base(fullPath) base := path.Base(fullPath)
c, err := f.getFtpConnection() c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "findItem") return nil, errors.Wrap(err, "findItem")
} }
@ -457,7 +460,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote) entry, err := f.findItem(ctx, remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -479,8 +482,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
} }
// dirExists checks the directory pointed to by remote exists or not // dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(remote string) (exists bool, err error) { func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(remote) entry, err := f.findItem(ctx, remote)
if err != nil { if err != nil {
return false, errors.Wrap(err, "dirExists") return false, errors.Wrap(err, "dirExists")
} }
@ -501,7 +504,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err) // defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
c, err := f.getFtpConnection() c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, errors.Wrap(err, "list")
} }
@ -522,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}() }()
// Wait for List for up to Timeout seconds // Wait for List for up to Timeout seconds
timer := time.NewTimer(fs.Config.Timeout) timer := time.NewTimer(f.ci.Timeout)
select { select {
case listErr = <-errchan: case listErr = <-errchan:
timer.Stop() timer.Stop()
@ -539,7 +542,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// doesn't exist, so check it really doesn't exist if no // doesn't exist, so check it really doesn't exist if no
// entries found. // entries found.
if len(files) == 0 { if len(files) == 0 {
exists, err := f.dirExists(dir) exists, err := f.dirExists(ctx, dir)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, errors.Wrap(err, "list")
} }
@ -592,7 +595,7 @@ func (f *Fs) Precision() time.Duration {
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote()) // fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote()) err := f.mkParentDir(ctx, src.Remote())
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed") return nil, errors.Wrap(err, "Put mkParentDir failed")
} }
@ -610,12 +613,12 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
} }
// getInfo reads the FileInfo for a path // getInfo reads the FileInfo for a path
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) { func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err) // defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote) dir := path.Dir(remote)
base := path.Base(remote) base := path.Base(remote)
c, err := f.getFtpConnection() c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "getInfo") return nil, errors.Wrap(err, "getInfo")
} }
@ -642,12 +645,12 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
} }
// mkdir makes the directory and parents using unrooted paths // mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(abspath string) error { func (f *Fs) mkdir(ctx context.Context, abspath string) error {
abspath = path.Clean(abspath) abspath = path.Clean(abspath)
if abspath == "." || abspath == "/" { if abspath == "." || abspath == "/" {
return nil return nil
} }
fi, err := f.getInfo(abspath) fi, err := f.getInfo(ctx, abspath)
if err == nil { if err == nil {
if fi.IsDir { if fi.IsDir {
return nil return nil
@ -657,11 +660,11 @@ func (f *Fs) mkdir(abspath string) error {
return errors.Wrapf(err, "mkdir %q failed", abspath) return errors.Wrapf(err, "mkdir %q failed", abspath)
} }
parent := path.Dir(abspath) parent := path.Dir(abspath)
err = f.mkdir(parent) err = f.mkdir(ctx, parent)
if err != nil { if err != nil {
return err return err
} }
c, connErr := f.getFtpConnection() c, connErr := f.getFtpConnection(ctx)
if connErr != nil { if connErr != nil {
return errors.Wrap(connErr, "mkdir") return errors.Wrap(connErr, "mkdir")
} }
@ -681,23 +684,23 @@ func (f *Fs) mkdir(abspath string) error {
// mkParentDir makes the parent of remote if necessary and any // mkParentDir makes the parent of remote if necessary and any
// directories above that // directories above that
func (f *Fs) mkParentDir(remote string) error { func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
parent := path.Dir(remote) parent := path.Dir(remote)
return f.mkdir(path.Join(f.root, parent)) return f.mkdir(ctx, path.Join(f.root, parent))
} }
// Mkdir creates the directory if it doesn't exist // Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err) // defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
return f.mkdir(root) return f.mkdir(ctx, root)
} }
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection() c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir") return errors.Wrap(translateErrorFile(err), "Rmdir")
} }
@ -713,11 +716,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
err := f.mkParentDir(remote) err := f.mkParentDir(ctx, remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed") return nil, errors.Wrap(err, "Move mkParentDir failed")
} }
c, err := f.getFtpConnection() c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move") return nil, errors.Wrap(err, "Move")
} }
@ -754,7 +757,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := path.Join(f.root, dstRemote) dstPath := path.Join(f.root, dstRemote)
// Check if destination exists // Check if destination exists
fi, err := f.getInfo(dstPath) fi, err := f.getInfo(ctx, dstPath)
if err == nil { if err == nil {
if fi.IsDir { if fi.IsDir {
return fs.ErrorDirExists return fs.ErrorDirExists
@ -765,13 +768,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// Make sure the parent directory exists // Make sure the parent directory exists
err = f.mkdir(path.Dir(dstPath)) err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil { if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed") return errors.Wrap(err, "DirMove mkParentDir dst failed")
} }
// Do the move // Do the move
c, err := f.getFtpConnection() c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "DirMove") return errors.Wrap(err, "DirMove")
} }
@ -903,7 +906,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
} }
c, err := o.fs.getFtpConnection() c, err := o.fs.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "open") return nil, errors.Wrap(err, "open")
} }
@ -938,7 +941,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Removed after failed upload: %v", err) fs.Debugf(o, "Removed after failed upload: %v", err)
} }
} }
c, err := o.fs.getFtpConnection() c, err := o.fs.getFtpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "Update") return errors.Wrap(err, "Update")
} }
@ -950,7 +953,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.Wrap(err, "update stor") return errors.Wrap(err, "update stor")
} }
o.fs.putFtpConnection(&c, nil) o.fs.putFtpConnection(&c, nil)
o.info, err = o.fs.getInfo(path) o.info, err = o.fs.getInfo(ctx, path)
if err != nil { if err != nil {
return errors.Wrap(err, "update getinfo") return errors.Wrap(err, "update getinfo")
} }
@ -962,14 +965,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// defer fs.Trace(o, "")("err=%v", &err) // defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file // Check if it's a directory or a file
info, err := o.fs.getInfo(path) info, err := o.fs.getInfo(ctx, path)
if err != nil { if err != nil {
return err return err
} }
if info.IsDir { if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote) err = o.fs.Rmdir(ctx, o.remote)
} else { } else {
c, err := o.fs.getFtpConnection() c, err := o.fs.getFtpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "Remove") return errors.Wrap(err, "Remove")
} }

View File

@ -375,7 +375,7 @@ func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error processing credentials") return nil, errors.Wrap(err, "error processing credentials")
} }
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(fs.Config)) ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(fs.GetConfig(ctx)))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
} }
@ -432,7 +432,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))), pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(), cache: bucket.NewCache(),
} }
f.setRoot(root) f.setRoot(root)

View File

@ -254,7 +254,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
baseClient := fshttp.NewClient(fs.Config) baseClient := fshttp.NewClient(fs.GetConfig(ctx))
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient) oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Box") return nil, errors.Wrap(err, "failed to configure Box")
@ -272,7 +272,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
unAuth: rest.NewClient(baseClient), unAuth: rest.NewClient(baseClient),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
ts: ts, ts: ts,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))), pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
startTime: time.Now(), startTime: time.Now(),
albums: map[bool]*albums{}, albums: map[bool]*albums{},
uploaded: dirtree.New(), uploaded: dirtree.New(),

View File

@ -115,8 +115,9 @@ type Options struct {
type Fs struct { type Fs struct {
name string name string
root string root string
features *fs.Features // optional features features *fs.Features // optional features
opt Options // options for this backend opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL endpoint *url.URL
endpointURL string // endpoint as a string endpointURL string // endpoint as a string
httpClient *http.Client httpClient *http.Client
@ -171,7 +172,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
client := fshttp.NewClient(fs.Config) client := fshttp.NewClient(fs.GetConfig(ctx))
var isFile = false var isFile = false
if !strings.HasSuffix(u.String(), "/") { if !strings.HasSuffix(u.String(), "/") {
@ -209,10 +210,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci,
httpClient: client, httpClient: client,
endpoint: u, endpoint: u,
endpointURL: u.String(), endpointURL: u.String(),
@ -439,14 +442,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var ( var (
entriesMu sync.Mutex // to protect entries entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup wg sync.WaitGroup
in = make(chan string, fs.Config.Checkers) checkers = f.ci.Checkers
in = make(chan string, checkers)
) )
add := func(entry fs.DirEntry) { add := func(entry fs.DirEntry) {
entriesMu.Lock() entriesMu.Lock()
entries = append(entries, entry) entries = append(entries, entry)
entriesMu.Unlock() entriesMu.Unlock()
} }
for i := 0; i < fs.Config.Checkers; i++ { for i := 0; i < checkers; i++ {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()

View File

@ -157,11 +157,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
// Make the swift Connection // Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{ c := &swiftLib.Connection{
Auth: newAuth(f), Auth: newAuth(f),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config), Transport: fshttp.NewTransport(fs.GetConfig(ctx)),
} }
err = c.Authenticate() err = c.Authenticate()
if err != nil { if err != nil {

View File

@ -230,7 +230,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// v1config configure a jottacloud backend using legacy authentication // v1config configure a jottacloud backend using legacy authentication
func v1config(ctx context.Context, name string, m configmap.Mapper) { func v1config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config)) srv := rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx)))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n") fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm(false) { if config.Confirm(false) {
@ -365,7 +365,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
// v2config configure a jottacloud backend using the modern JottaCli token based authentication // v2config configure a jottacloud backend using the modern JottaCli token based authentication
func v2config(ctx context.Context, name string, m configmap.Mapper) { func v2config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config)) srv := rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx)))
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n") fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ") fmt.Printf("Login Token> ")
@ -661,7 +661,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.New("Outdated config - please reconfigure this backend") return nil, errors.New("Outdated config - please reconfigure this backend")
} }
baseClient := fshttp.NewClient(fs.Config) baseClient := fshttp.NewClient(fs.GetConfig(ctx))
if ver == configVersion { if ver == configVersion {
oauthConfig.ClientID = "jottacli" oauthConfig.ClientID = "jottacli"
@ -711,7 +711,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL), apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,

View File

@ -267,7 +267,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
return nil, err return nil, err
} }
httpClient := httpclient.New() httpClient := httpclient.New()
httpClient.Client = fshttp.NewClient(fs.Config) httpClient.Client = fshttp.NewClient(fs.GetConfig(ctx))
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient) client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
basicAuth := fmt.Sprintf("Basic %s", basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass))) base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))

View File

@ -273,6 +273,7 @@ type Fs struct {
name string name string
root string // root path root string // root path
opt Options // parsed options opt Options // parsed options
ci *fs.ConfigInfo // global config
speedupGlobs []string // list of file name patterns eligible for speedup speedupGlobs []string // list of file name patterns eligible for speedup
speedupAny bool // true if all file names are eligible for speedup speedupAny bool // true if all file names are eligible for speedup
features *fs.Features // optional features features *fs.Features // optional features
@ -312,10 +313,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// However the f.root string should not have leading or trailing slashes // However the f.root string should not have leading or trailing slashes
root = strings.Trim(root, "/") root = strings.Trim(root, "/")
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci,
m: m, m: m,
} }
@ -324,7 +327,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
f.quirks.parseQuirks(opt.Quirks) f.quirks.parseQuirks(opt.Quirks)
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer))) f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@ -335,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f) }).Fill(ctx, f)
// Override few config settings and create a client // Override few config settings and create a client
clientConfig := *fs.Config clientConfig := *fs.GetConfig(ctx)
if opt.UserAgent != "" { if opt.UserAgent != "" {
clientConfig.UserAgent = opt.UserAgent clientConfig.UserAgent = opt.UserAgent
} }
@ -692,7 +695,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32) entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32)
} }
if err == nil && fs.Config.LogLevel >= fs.LogLevelDebug { if err == nil && f.ci.LogLevel >= fs.LogLevelDebug {
names := []string{} names := []string{}
for _, entry := range entries { for _, entry := range entries {
names = append(names, entry.Remote()) names = append(names, entry.Remote())
@ -956,7 +959,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
return nil, r.Error() return nil, r.Error()
} }
if fs.Config.LogLevel >= fs.LogLevelDebug { if t.f.ci.LogLevel >= fs.LogLevelDebug {
ctime, _ := modTime.MarshalJSON() ctime, _ := modTime.MarshalJSON()
fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime) fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime)
} }
@ -2376,7 +2379,7 @@ func (p *serverPool) addServer(url string, now time.Time) {
expiry := now.Add(p.expirySec * time.Second) expiry := now.Add(p.expirySec * time.Second)
expiryStr := []byte("-") expiryStr := []byte("-")
if fs.Config.LogLevel >= fs.LogLevelInfo { if p.fs.ci.LogLevel >= fs.LogLevelInfo {
expiryStr, _ = expiry.MarshalJSON() expiryStr, _ = expiry.MarshalJSON()
} }

View File

@ -194,6 +194,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.Wrap(err, "couldn't decrypt password") return nil, errors.Wrap(err, "couldn't decrypt password")
} }
} }
ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can re-use and share // cache *mega.Mega on username so we can re-use and share
// them between remotes. They are expensive to make as they // them between remotes. They are expensive to make as they
@ -204,8 +205,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
defer megaCacheMu.Unlock() defer megaCacheMu.Unlock()
srv := megaCache[opt.User] srv := megaCache[opt.User]
if srv == nil { if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(fs.Config)) srv = mega.New().SetClient(fshttp.NewClient(fs.GetConfig(ctx)))
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetLogger(func(format string, v ...interface{}) { srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...) fs.Infof("*go-mega*", format, v...)
}) })
@ -228,7 +229,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
root: root, root: root,
opt: *opt, opt: *opt,
srv: srv, srv: srv,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
DuplicateFiles: true, DuplicateFiles: true,

View File

@ -81,6 +81,7 @@ func init() {
Description: "Microsoft OneDrive", Description: "Microsoft OneDrive",
NewFs: NewFs, NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) { Config: func(ctx context.Context, name string, m configmap.Mapper) {
ci := fs.GetConfig(ctx)
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil) err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
if err != nil { if err != nil {
log.Fatalf("Failed to configure token: %v", err) log.Fatalf("Failed to configure token: %v", err)
@ -88,7 +89,7 @@ func init() {
} }
// Stop if we are running non-interactive config // Stop if we are running non-interactive config
if fs.Config.AutoConfirm { if ci.AutoConfirm {
return return
} }
@ -363,6 +364,7 @@ type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on root string // the path we are working on
opt Options // parsed options opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
@ -618,14 +620,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.Wrap(err, "failed to configure OneDrive") return nil, errors.Wrap(err, "failed to configure OneDrive")
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci,
driveID: opt.DriveID, driveID: opt.DriveID,
driveType: opt.DriveType, driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID), srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@ -971,7 +975,7 @@ func (f *Fs) Precision() time.Duration {
// waitForJob waits for the job with status in url to complete // waitForJob waits for the job with status in url to complete
func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error { func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
deadline := time.Now().Add(fs.Config.Timeout) deadline := time.Now().Add(f.ci.Timeout)
for time.Now().Before(deadline) { for time.Now().Before(deadline) {
var resp *http.Response var resp *http.Response
var err error var err error
@ -1007,7 +1011,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout) return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
} }
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
@ -1300,7 +1304,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
// CleanUp deletes all the hidden files. // CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, fs.Config.Checkers) token := make(chan struct{}, f.ci.Checkers)
var wg sync.WaitGroup var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error { err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
err = entries.ForObjectError(func(obj fs.Object) error { err = entries.ForObjectError(func(obj fs.Object) error {

View File

@ -187,8 +187,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), srv: rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetErrorHandler(errorHandler),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.dirCache = dircache.New(root, "0", f) f.dirCache = dircache.New(root, "0", f)

View File

@ -299,7 +299,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname), srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: false, CaseInsensitive: false,

View File

@ -252,7 +252,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.Wrap(err, "failed to configure premiumize.me") return nil, errors.Wrap(err, "failed to configure premiumize.me")
} }
} else { } else {
client = fshttp.NewClient(fs.Config) client = fshttp.NewClient(fs.GetConfig(ctx))
} }
f := &Fs{ f := &Fs{
@ -260,7 +260,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL), srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,

View File

@ -77,7 +77,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs,
return nil, err return nil, err
} }
root = parsePath(root) root = parsePath(root)
httpClient := fshttp.NewClient(fs.Config) httpClient := fshttp.NewClient(fs.GetConfig(ctx))
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient) oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure putio") return nil, errors.Wrap(err, "failed to configure putio")
@ -86,7 +86,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs,
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
client: putio.NewClient(oAuthClient), client: putio.NewClient(oAuthClient),
httpClient: httpClient, httpClient: httpClient,
oAuthClient: oAuthClient, oAuthClient: oAuthClient,

View File

@ -228,7 +228,7 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
} }
// qsConnection makes a connection to qingstor // qsConnection makes a connection to qingstor
func qsServiceConnection(opt *Options) (*qs.Service, error) { func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error) {
accessKeyID := opt.AccessKeyID accessKeyID := opt.AccessKeyID
secretAccessKey := opt.SecretAccessKey secretAccessKey := opt.SecretAccessKey
@ -277,7 +277,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
cf.Host = host cf.Host = host
cf.Port = port cf.Port = port
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries // unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
cf.Connection = fshttp.NewClient(fs.Config) cf.Connection = fshttp.NewClient(fs.GetConfig(ctx))
return qs.Init(cf) return qs.Init(cf)
} }
@ -334,7 +334,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil { if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff") return nil, errors.Wrap(err, "qingstor: upload cutoff")
} }
svc, err := qsServiceConnection(opt) svc, err := qsServiceConnection(ctx, opt)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1285,6 +1285,8 @@ type Fs struct {
name string // the name of the remote name string // the name of the remote
root string // root of the bucket - ignore all objects above this root string // root of the bucket - ignore all objects above this
opt Options // parsed options opt Options // parsed options
ci *fs.ConfigInfo // global config
ctx context.Context // global context for reading config
features *fs.Features // optional features features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session ses *session.Session // the s3 session
@ -1401,9 +1403,9 @@ func (o *Object) split() (bucket, bucketPath string) {
} }
// getClient makes an http client according to the options // getClient makes an http client according to the options
func getClient(opt *Options) *http.Client { func getClient(ctx context.Context, opt *Options) *http.Client {
// TODO: Do we need cookies too? // TODO: Do we need cookies too?
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) { t := fshttp.NewTransportCustom(fs.GetConfig(ctx), func(t *http.Transport) {
if opt.DisableHTTP2 { if opt.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{} t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
} }
@ -1414,7 +1416,7 @@ func getClient(opt *Options) *http.Client {
} }
// s3Connection makes a connection to s3 // s3Connection makes a connection to s3
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) { func s3Connection(ctx context.Context, opt *Options) (*s3.S3, *session.Session, error) {
// Make the auth // Make the auth
v := credentials.Value{ v := credentials.Value{
AccessKeyID: opt.AccessKeyID, AccessKeyID: opt.AccessKeyID,
@ -1492,7 +1494,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
awsConfig := aws.NewConfig(). awsConfig := aws.NewConfig().
WithMaxRetries(0). // Rely on rclone's retry logic WithMaxRetries(0). // Rely on rclone's retry logic
WithCredentials(cred). WithCredentials(cred).
WithHTTPClient(getClient(opt)). WithHTTPClient(getClient(ctx, opt)).
WithS3ForcePathStyle(opt.ForcePathStyle). WithS3ForcePathStyle(opt.ForcePathStyle).
WithS3UseAccelerate(opt.UseAccelerateEndpoint). WithS3UseAccelerate(opt.UseAccelerateEndpoint).
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint) WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
@ -1599,23 +1601,26 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey)) md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:]) opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
} }
c, ses, err := s3Connection(opt) c, ses, err := s3Connection(ctx, opt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
ci: ci,
ctx: ctx,
c: c, c: c,
ses: ses, ses: ses,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))), pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(), cache: bucket.NewCache(),
srv: getClient(opt), srv: getClient(ctx, opt),
pool: pool.New( pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime), time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize), int(opt.ChunkSize),
opt.UploadConcurrency*fs.Config.Transfers, opt.UploadConcurrency*ci.Transfers,
opt.MemoryPoolUseMmap, opt.MemoryPoolUseMmap,
), ),
} }
@ -1728,7 +1733,7 @@ func (f *Fs) updateRegionForBucket(bucket string) error {
// Make a new session with the new region // Make a new session with the new region
oldRegion := f.opt.Region oldRegion := f.opt.Region
f.opt.Region = region f.opt.Region = region
c, ses, err := s3Connection(&f.opt) c, ses, err := s3Connection(f.ctx, &f.opt)
if err != nil { if err != nil {
return errors.Wrap(err, "creating new session failed") return errors.Wrap(err, "creating new session failed")
} }
@ -2343,7 +2348,7 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
return pool.New( return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime), time.Duration(f.opt.MemoryPoolFlushTime),
int(size), int(size),
f.opt.UploadConcurrency*fs.Config.Transfers, f.opt.UploadConcurrency*f.ci.Transfers,
f.opt.MemoryPoolUseMmap, f.opt.MemoryPoolUseMmap,
) )
} }
@ -2810,7 +2815,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime { if o.fs.ci.UseServerModTime {
return o.lastModified return o.lastModified
} }
err := o.readMetaData(ctx) err := o.readMetaData(ctx)

View File

@ -1,6 +1,7 @@
package seafile package seafile
import ( import (
"context"
"fmt" "fmt"
"net/url" "net/url"
"sync" "sync"
@ -27,7 +28,7 @@ func init() {
} }
// getPacer returns the unique pacer for that remote URL // getPacer returns the unique pacer for that remote URL
func getPacer(remote string) *fs.Pacer { func getPacer(ctx context.Context, remote string) *fs.Pacer {
pacerMutex.Lock() pacerMutex.Lock()
defer pacerMutex.Unlock() defer pacerMutex.Unlock()
@ -37,6 +38,7 @@ func getPacer(remote string) *fs.Pacer {
} }
pacers[remote] = fs.NewPacer( pacers[remote] = fs.NewPacer(
ctx,
pacer.NewDefault( pacer.NewDefault(
pacer.MinSleep(minSleep), pacer.MinSleep(minSleep),
pacer.MaxSleep(maxSleep), pacer.MaxSleep(maxSleep),

View File

@ -197,8 +197,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
endpoint: u, endpoint: u,
endpointURL: u.String(), endpointURL: u.String(),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()), srv: rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetRoot(u.String()),
pacer: getPacer(opt.URL), pacer: getPacer(ctx, opt.URL),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
@ -297,6 +297,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Config callback for 2FA // Config callback for 2FA
func Config(ctx context.Context, name string, m configmap.Mapper) { func Config(ctx context.Context, name string, m configmap.Mapper) {
ci := fs.GetConfig(ctx)
serverURL, ok := m.Get(configURL) serverURL, ok := m.Get(configURL)
if !ok || serverURL == "" { if !ok || serverURL == "" {
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile" // If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
@ -305,7 +306,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper) {
} }
// Stop if we are running non-interactive config // Stop if we are running non-interactive config
if fs.Config.AutoConfirm { if ci.AutoConfirm {
return return
} }
@ -342,7 +343,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper) {
if !strings.HasPrefix(url, "/") { if !strings.HasPrefix(url, "/") {
url += "/" url += "/"
} }
srv := rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(url) srv := rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetRoot(url)
// We loop asking for a 2FA code // We loop asking for a 2FA code
for { for {

View File

@ -226,6 +226,7 @@ type Fs struct {
root string root string
absRoot string absRoot string
opt Options // parsed options opt Options // parsed options
ci *fs.ConfigInfo // global config
m configmap.Mapper // config m configmap.Mapper // config
features *fs.Features // optional features features *fs.Features // optional features
config *ssh.ClientConfig config *ssh.ClientConfig
@ -252,8 +253,8 @@ type Object struct {
// dial starts a client connection to the given SSH server. It is a // dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address, // convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client. // initiates the SSH handshake, and then sets up a Client.
func (f *Fs) dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) { func (f *Fs) dial(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
dialer := fshttp.NewDialer(fs.Config) dialer := fshttp.NewDialer(fs.GetConfig(ctx))
conn, err := dialer.Dial(network, addr) conn, err := dialer.Dial(network, addr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -299,12 +300,12 @@ func (c *conn) closed() error {
} }
// Open a new connection to the SFTP server. // Open a new connection to the SFTP server.
func (f *Fs) sftpConnection() (c *conn, err error) { func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
// Rate limit rate of new connections // Rate limit rate of new connections
c = &conn{ c = &conn{
err: make(chan error, 1), err: make(chan error, 1),
} }
c.sshClient, err = f.dial("tcp", f.opt.Host+":"+f.opt.Port, f.config) c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't connect SSH") return nil, errors.Wrap(err, "couldn't connect SSH")
} }
@ -347,7 +348,7 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
} }
// Get an SFTP connection from the pool, or open a new one // Get an SFTP connection from the pool, or open a new one
func (f *Fs) getSftpConnection() (c *conn, err error) { func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
f.poolMu.Lock() f.poolMu.Lock()
for len(f.pool) > 0 { for len(f.pool) > 0 {
c = f.pool[0] c = f.pool[0]
@ -364,7 +365,7 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
return c, nil return c, nil
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
c, err = f.sftpConnection() c, err = f.sftpConnection(ctx)
if err != nil { if err != nil {
return true, err return true, err
} }
@ -417,7 +418,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// This will hold the Fs object. We need to create it here // This will hold the Fs object. We need to create it here
// so we can refer to it in the SSH callback, but it's populated // so we can refer to it in the SSH callback, but it's populated
// in NewFsWithConnection // in NewFsWithConnection
f := &Fs{} f := &Fs{
ci: fs.GetConfig(ctx),
}
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@ -435,8 +438,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
User: opt.User, User: opt.User,
Auth: []ssh.AuthMethod{}, Auth: []ssh.AuthMethod{},
HostKeyCallback: ssh.InsecureIgnoreHostKey(), HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: fs.Config.ConnectTimeout, Timeout: f.ci.ConnectTimeout,
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent, ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
} }
if opt.KnownHostsFile != "" { if opt.KnownHostsFile != "" {
@ -603,7 +606,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
f.config = sshConfig f.config = sshConfig
f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root
f.mkdirLock = newStringLock() f.mkdirLock = newStringLock()
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))) f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
f.savedpswd = "" f.savedpswd = ""
f.features = (&fs.Features{ f.features = (&fs.Features{
@ -611,7 +614,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
SlowHash: true, SlowHash: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Make a connection and pool it to return errors early // Make a connection and pool it to return errors early
c, err := f.getSftpConnection() c, err := f.getSftpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "NewFs") return nil, errors.Wrap(err, "NewFs")
} }
@ -679,7 +682,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f, fs: f,
remote: remote, remote: remote,
} }
err := o.stat() err := o.stat(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -688,11 +691,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// dirExists returns true,nil if the directory exists, false, nil if // dirExists returns true,nil if the directory exists, false, nil if
// it doesn't or false, err // it doesn't or false, err
func (f *Fs) dirExists(dir string) (bool, error) { func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
if dir == "" { if dir == "" {
dir = "." dir = "."
} }
c, err := f.getSftpConnection() c, err := f.getSftpConnection(ctx)
if err != nil { if err != nil {
return false, errors.Wrap(err, "dirExists") return false, errors.Wrap(err, "dirExists")
} }
@ -721,7 +724,7 @@ func (f *Fs) dirExists(dir string) (bool, error) {
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
root := path.Join(f.absRoot, dir) root := path.Join(f.absRoot, dir)
ok, err := f.dirExists(root) ok, err := f.dirExists(ctx, root)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "List failed") return nil, errors.Wrap(err, "List failed")
} }
@ -732,7 +735,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if sftpDir == "" { if sftpDir == "" {
sftpDir = "." sftpDir = "."
} }
c, err := f.getSftpConnection() c, err := f.getSftpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "List") return nil, errors.Wrap(err, "List")
} }
@ -751,7 +754,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue continue
} }
oldInfo := info oldInfo := info
info, err = f.stat(remote) info, err = f.stat(ctx, remote)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
fs.Errorf(remote, "stat of non-regular file failed: %v", err) fs.Errorf(remote, "stat of non-regular file failed: %v", err)
@ -776,7 +779,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime(ctx)> // Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime(ctx)>
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
err := f.mkParentDir(src.Remote()) err := f.mkParentDir(ctx, src.Remote())
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed") return nil, errors.Wrap(err, "Put mkParentDir failed")
} }
@ -799,19 +802,19 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// mkParentDir makes the parent of remote if necessary and any // mkParentDir makes the parent of remote if necessary and any
// directories above that // directories above that
func (f *Fs) mkParentDir(remote string) error { func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
parent := path.Dir(remote) parent := path.Dir(remote)
return f.mkdir(path.Join(f.absRoot, parent)) return f.mkdir(ctx, path.Join(f.absRoot, parent))
} }
// mkdir makes the directory and parents using native paths // mkdir makes the directory and parents using native paths
func (f *Fs) mkdir(dirPath string) error { func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
f.mkdirLock.Lock(dirPath) f.mkdirLock.Lock(dirPath)
defer f.mkdirLock.Unlock(dirPath) defer f.mkdirLock.Unlock(dirPath)
if dirPath == "." || dirPath == "/" { if dirPath == "." || dirPath == "/" {
return nil return nil
} }
ok, err := f.dirExists(dirPath) ok, err := f.dirExists(ctx, dirPath)
if err != nil { if err != nil {
return errors.Wrap(err, "mkdir dirExists failed") return errors.Wrap(err, "mkdir dirExists failed")
} }
@ -819,11 +822,11 @@ func (f *Fs) mkdir(dirPath string) error {
return nil return nil
} }
parent := path.Dir(dirPath) parent := path.Dir(dirPath)
err = f.mkdir(parent) err = f.mkdir(ctx, parent)
if err != nil { if err != nil {
return err return err
} }
c, err := f.getSftpConnection() c, err := f.getSftpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "mkdir") return errors.Wrap(err, "mkdir")
} }
@ -838,7 +841,7 @@ func (f *Fs) mkdir(dirPath string) error {
// Mkdir makes the root directory of the Fs object // Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
root := path.Join(f.absRoot, dir) root := path.Join(f.absRoot, dir)
return f.mkdir(root) return f.mkdir(ctx, root)
} }
// Rmdir removes the root directory of the Fs object // Rmdir removes the root directory of the Fs object
@ -854,7 +857,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
// Remove the directory // Remove the directory
root := path.Join(f.absRoot, dir) root := path.Join(f.absRoot, dir)
c, err := f.getSftpConnection() c, err := f.getSftpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "Rmdir") return errors.Wrap(err, "Rmdir")
} }
@ -870,11 +873,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
err := f.mkParentDir(remote) err := f.mkParentDir(ctx, remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed") return nil, errors.Wrap(err, "Move mkParentDir failed")
} }
c, err := f.getSftpConnection() c, err := f.getSftpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move") return nil, errors.Wrap(err, "Move")
} }
@ -911,7 +914,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := path.Join(f.absRoot, dstRemote) dstPath := path.Join(f.absRoot, dstRemote)
// Check if destination exists // Check if destination exists
ok, err := f.dirExists(dstPath) ok, err := f.dirExists(ctx, dstPath)
if err != nil { if err != nil {
return errors.Wrap(err, "DirMove dirExists dst failed") return errors.Wrap(err, "DirMove dirExists dst failed")
} }
@ -920,13 +923,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// Make sure the parent directory exists // Make sure the parent directory exists
err = f.mkdir(path.Dir(dstPath)) err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil { if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed") return errors.Wrap(err, "DirMove mkParentDir dst failed")
} }
// Do the move // Do the move
c, err := f.getSftpConnection() c, err := f.getSftpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "DirMove") return errors.Wrap(err, "DirMove")
} }
@ -942,8 +945,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// run runds cmd on the remote end returning standard output // run runds cmd on the remote end returning standard output
func (f *Fs) run(cmd string) ([]byte, error) { func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
c, err := f.getSftpConnection() c, err := f.getSftpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "run: get SFTP connection") return nil, errors.Wrap(err, "run: get SFTP connection")
} }
@ -971,6 +974,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
// Hashes returns the supported hash types of the filesystem // Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
ctx := context.TODO()
if f.opt.DisableHashCheck { if f.opt.DisableHashCheck {
return hash.Set(hash.None) return hash.Set(hash.None)
} }
@ -989,7 +993,7 @@ func (f *Fs) Hashes() hash.Set {
} }
*changed = true *changed = true
for _, command := range commands { for _, command := range commands {
output, err := f.run(command) output, err := f.run(ctx, command)
if err != nil { if err != nil {
continue continue
} }
@ -1034,7 +1038,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if len(escapedPath) == 0 { if len(escapedPath) == 0 {
escapedPath = "/" escapedPath = "/"
} }
stdout, err := f.run("df -k " + escapedPath) stdout, err := f.run(ctx, "df -k "+escapedPath)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "your remote may not support About") return nil, errors.Wrap(err, "your remote may not support About")
} }
@ -1097,7 +1101,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
c, err := o.fs.getSftpConnection() c, err := o.fs.getSftpConnection(ctx)
if err != nil { if err != nil {
return "", errors.Wrap(err, "Hash get SFTP connection") return "", errors.Wrap(err, "Hash get SFTP connection")
} }
@ -1205,8 +1209,8 @@ func (o *Object) setMetadata(info os.FileInfo) {
} }
// statRemote stats the file or directory at the remote given // statRemote stats the file or directory at the remote given
func (f *Fs) stat(remote string) (info os.FileInfo, err error) { func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
c, err := f.getSftpConnection() c, err := f.getSftpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "stat") return nil, errors.Wrap(err, "stat")
} }
@ -1217,8 +1221,8 @@ func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
} }
// stat updates the info in the Object // stat updates the info in the Object
func (o *Object) stat() error { func (o *Object) stat(ctx context.Context) error {
info, err := o.fs.stat(o.remote) info, err := o.fs.stat(ctx, o.remote)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
@ -1237,7 +1241,7 @@ func (o *Object) stat() error {
// it also updates the info field // it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if o.fs.opt.SetModTime { if o.fs.opt.SetModTime {
c, err := o.fs.getSftpConnection() c, err := o.fs.getSftpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "SetModTime") return errors.Wrap(err, "SetModTime")
} }
@ -1247,7 +1251,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errors.Wrap(err, "SetModTime failed") return errors.Wrap(err, "SetModTime failed")
} }
} }
err := o.stat() err := o.stat(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "SetModTime stat failed") return errors.Wrap(err, "SetModTime stat failed")
} }
@ -1320,7 +1324,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
} }
} }
c, err := o.fs.getSftpConnection() c, err := o.fs.getSftpConnection(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Open") return nil, errors.Wrap(err, "Open")
} }
@ -1344,7 +1348,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Clear the hash cache since we are about to update the object // Clear the hash cache since we are about to update the object
o.md5sum = nil o.md5sum = nil
o.sha1sum = nil o.sha1sum = nil
c, err := o.fs.getSftpConnection() c, err := o.fs.getSftpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "Update") return errors.Wrap(err, "Update")
} }
@ -1355,7 +1359,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// remove the file if upload failed // remove the file if upload failed
remove := func() { remove := func() {
c, removeErr := o.fs.getSftpConnection() c, removeErr := o.fs.getSftpConnection(ctx)
if removeErr != nil { if removeErr != nil {
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr) fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
return return
@ -1387,7 +1391,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove a remote sftp file object // Remove a remote sftp file object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
c, err := o.fs.getSftpConnection() c, err := o.fs.getSftpConnection(ctx)
if err != nil { if err != nil {
return errors.Wrap(err, "Remove") return errors.Wrap(err, "Remove")
} }

View File

@ -237,6 +237,7 @@ type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on root string // the path we are working on
opt Options // parsed options opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
@ -441,12 +442,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.Wrap(err, "failed to configure sharefile") return nil, errors.Wrap(err, "failed to configure sharefile")
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci,
srv: rest.NewClient(client).SetRoot(opt.Endpoint + apiPath), srv: rest.NewClient(client).SetRoot(opt.Endpoint + apiPath),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@ -531,8 +534,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Fill up (or reset) the buffer tokens // Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() { func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, fs.Config.Transfers) f.bufferTokens = make(chan []byte, f.ci.Transfers)
for i := 0; i < fs.Config.Transfers; i++ { for i := 0; i < f.ci.Transfers; i++ {
f.bufferTokens <- nil f.bufferTokens <- nil
} }
} }
@ -1338,7 +1341,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Overwrite: true, Overwrite: true,
CreatedDate: modTime, CreatedDate: modTime,
ModifiedDate: modTime, ModifiedDate: modTime,
Tool: fs.Config.UserAgent, Tool: o.fs.ci.UserAgent,
} }
if isLargeFile { if isLargeFile {
@ -1348,7 +1351,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} else { } else {
// otherwise use threaded which is more efficient // otherwise use threaded which is more efficient
req.Method = "threaded" req.Method = "threaded"
req.ThreadCount = &fs.Config.Transfers req.ThreadCount = &o.fs.ci.Transfers
req.Filesize = &size req.Filesize = &size
} }
} }

View File

@ -58,7 +58,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method) return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
} }
threads := fs.Config.Transfers threads := f.ci.Transfers
if threads > info.MaxNumberOfThreads { if threads > info.MaxNumberOfThreads {
threads = info.MaxNumberOfThreads threads = info.MaxNumberOfThreads
} }

View File

@ -106,7 +106,7 @@ func init() {
Method: "POST", Method: "POST",
Path: "/app-authorization", Path: "/app-authorization",
} }
srv := rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL) // FIXME srv := rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetRoot(rootURL) // FIXME
// FIXME // FIXME
//err = f.pacer.Call(func() (bool, error) { //err = f.pacer.Call(func() (bool, error) {
@ -403,13 +403,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
root = parsePath(root) root = parsePath(root)
client := fshttp.NewClient(fs.Config) client := fshttp.NewClient(fs.GetConfig(ctx))
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL), srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
m: m, m: m,
authExpiry: parseExpiry(opt.AuthorizationExpiry), authExpiry: parseExpiry(opt.AuthorizationExpiry),
} }

View File

@ -221,6 +221,7 @@ type Fs struct {
root string // the path we are working on if any root string // the path we are working on if any
features *fs.Features // optional features features *fs.Features // optional features
opt Options // options for this backend opt Options // options for this backend
ci *fs.ConfigInfo // global config
c *swift.Connection // the connection to the swift server c *swift.Connection // the connection to the swift server
rootContainer string // container part of root (if any) rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any) rootDirectory string // directory part of root (if any)
@ -340,7 +341,8 @@ func (o *Object) split() (container, containerPath string) {
} }
// swiftConnection makes a connection to swift // swiftConnection makes a connection to swift
func swiftConnection(opt *Options, name string) (*swift.Connection, error) { func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Connection, error) {
ci := fs.GetConfig(ctx)
c := &swift.Connection{ c := &swift.Connection{
// Keep these in the same order as the Config for ease of checking // Keep these in the same order as the Config for ease of checking
UserName: opt.User, UserName: opt.User,
@ -359,9 +361,9 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
ApplicationCredentialName: opt.ApplicationCredentialName, ApplicationCredentialName: opt.ApplicationCredentialName,
ApplicationCredentialSecret: opt.ApplicationCredentialSecret, ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
EndpointType: swift.EndpointType(opt.EndpointType), EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config), Transport: fshttp.NewTransport(fs.GetConfig(ctx)),
} }
if opt.EnvAuth { if opt.EnvAuth {
err := c.ApplyEnvironment() err := c.ApplyEnvironment()
@ -433,12 +435,14 @@ func (f *Fs) setRoot(root string) {
// if noCheckContainer is set then the Fs won't check the container // if noCheckContainer is set then the Fs won't check the container
// exists before creating it. // exists before creating it.
func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) { func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
ci: ci,
c: c, c: c,
noCheckContainer: noCheckContainer, noCheckContainer: noCheckContainer,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))), pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(), cache: bucket.NewCache(),
} }
f.setRoot(root) f.setRoot(root)
@ -485,7 +489,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.Wrap(err, "swift: chunk size") return nil, errors.Wrap(err, "swift: chunk size")
} }
c, err := swiftConnection(opt, name) c, err := swiftConnection(ctx, opt, name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -849,7 +853,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return fs.ErrorListBucketRequired return fs.ErrorListBucketRequired
} }
// Delete all the files including the directory markers // Delete all the files including the directory markers
toBeDeleted := make(chan fs.Object, fs.Config.Transfers) toBeDeleted := make(chan fs.Object, f.ci.Transfers)
delErr := make(chan error, 1) delErr := make(chan error, 1)
go func() { go func() {
delErr <- operations.DeleteFiles(ctx, toBeDeleted) delErr <- operations.DeleteFiles(ctx, toBeDeleted)
@ -1040,7 +1044,7 @@ func (o *Object) readMetaData() (err error) {
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime { if o.fs.ci.UseServerModTime {
return o.lastModified return o.lastModified
} }
err := o.readMetaData() err := o.readMetaData()

View File

@ -182,7 +182,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
client := fshttp.NewClient(fs.Config) client := fshttp.NewClient(fs.GetConfig(ctx))
resp, err := client.Do(req) resp, err := client.Do(req)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Error while logging in to endpoint") return nil, errors.Wrap(err, "Error while logging in to endpoint")

View File

@ -336,8 +336,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
endpoint: u, endpoint: u,
endpointURL: u.String(), endpointURL: u.String(),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()), srv: rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetRoot(u.String()),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
precision: fs.ModTimeNotSupported, precision: fs.ModTimeNotSupported,
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@ -88,12 +88,13 @@ type Options struct {
// Fs represents a remote yandex // Fs represents a remote yandex
type Fs struct { type Fs struct {
name string name string
root string // root path root string // root path
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features ci *fs.ConfigInfo // global config
srv *rest.Client // the connection to the yandex server features *fs.Features // optional features
pacer *fs.Pacer // pacer for API calls srv *rest.Client // the connection to the yandex server
diskRoot string // root path with "disk:/" container name pacer *fs.Pacer // pacer for API calls
diskRoot string // root path with "disk:/" container name
} }
// Object describes a swift object // Object describes a swift object
@ -265,11 +266,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
log.Fatalf("Failed to configure Yandex: %v", err) log.Fatalf("Failed to configure Yandex: %v", err)
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
ci: ci,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.setRoot(root) f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
@ -534,7 +537,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
RootURL: location, RootURL: location,
Method: "GET", Method: "GET",
} }
deadline := time.Now().Add(fs.Config.Timeout) deadline := time.Now().Add(f.ci.Timeout)
for time.Now().Before(deadline) { for time.Now().Before(deadline) {
var resp *http.Response var resp *http.Response
var body []byte var body []byte
@ -565,7 +568,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout) return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
} }
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) { func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {

View File

@ -234,12 +234,13 @@ func ShowStats() bool {
// Run the function with stats and retries if required // Run the function with stats and retries if required
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) { func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
ci := fs.GetConfig(context.Background())
var cmdErr error var cmdErr error
stopStats := func() {} stopStats := func() {}
if !showStats && ShowStats() { if !showStats && ShowStats() {
showStats = true showStats = true
} }
if fs.Config.Progress { if ci.Progress {
stopStats = startProgress() stopStats = startProgress()
} else if showStats { } else if showStats {
stopStats = StartStats() stopStats = StartStats()
@ -291,13 +292,13 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
} }
fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine()) fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine())
if fs.Config.Progress && fs.Config.ProgressTerminalTitle { if ci.Progress && ci.ProgressTerminalTitle {
// Clear terminal title // Clear terminal title
terminal.WriteTerminalTitle("") terminal.WriteTerminalTitle("")
} }
// dump all running go-routines // dump all running go-routines
if fs.Config.Dump&fs.DumpGoRoutines != 0 { if ci.Dump&fs.DumpGoRoutines != 0 {
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
if err != nil { if err != nil {
fs.Errorf(nil, "Failed to dump goroutines: %v", err) fs.Errorf(nil, "Failed to dump goroutines: %v", err)
@ -305,7 +306,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
} }
// dump open files // dump open files
if fs.Config.Dump&fs.DumpOpenFiles != 0 { if ci.Dump&fs.DumpOpenFiles != 0 {
c := exec.Command("lsof", "-p", strconv.Itoa(os.Getpid())) c := exec.Command("lsof", "-p", strconv.Itoa(os.Getpid()))
c.Stdout = os.Stdout c.Stdout = os.Stdout
c.Stderr = os.Stderr c.Stderr = os.Stderr
@ -372,17 +373,18 @@ func StartStats() func() {
// initConfig is run by cobra after initialising the flags // initConfig is run by cobra after initialising the flags
func initConfig() { func initConfig() {
ci := fs.GetConfig(context.Background())
// Activate logger systemd support if systemd invocation ID is detected // Activate logger systemd support if systemd invocation ID is detected
_, sysdLaunch := systemd.GetInvocationID() _, sysdLaunch := systemd.GetInvocationID()
if sysdLaunch { if sysdLaunch {
fs.Config.LogSystemdSupport = true // used during fslog.InitLogging() ci.LogSystemdSupport = true // used during fslog.InitLogging()
} }
// Start the logger // Start the logger
fslog.InitLogging() fslog.InitLogging()
// Finish parsing any command line flags // Finish parsing any command line flags
configflags.SetFlags() configflags.SetFlags(ci)
// Load filters // Load filters
err := filterflags.Reload() err := filterflags.Reload()
@ -396,7 +398,7 @@ func initConfig() {
// Inform user about systemd log support now that we have a logger // Inform user about systemd log support now that we have a logger
if sysdLaunch { if sysdLaunch {
fs.Debugf("rclone", "systemd logging support automatically activated") fs.Debugf("rclone", "systemd logging support automatically activated")
} else if fs.Config.LogSystemdSupport { } else if ci.LogSystemdSupport {
fs.Debugf("rclone", "systemd logging support manually activated") fs.Debugf("rclone", "systemd logging support manually activated")
} }
@ -448,16 +450,17 @@ func initConfig() {
if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); m == false { if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); m == false {
fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.") fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
fs.Config.DataRateUnit = "bytes" ci.DataRateUnit = "bytes"
} else { } else {
fs.Config.DataRateUnit = *dataRateUnit ci.DataRateUnit = *dataRateUnit
} }
} }
func resolveExitCode(err error) { func resolveExitCode(err error) {
ci := fs.GetConfig(context.Background())
atexit.Run() atexit.Run()
if err == nil { if err == nil {
if fs.Config.ErrorOnNoTransfer { if ci.ErrorOnNoTransfer {
if accounting.GlobalStats().GetTransfers() == 0 { if accounting.GlobalStats().GetTransfers() == 0 {
os.Exit(exitCodeNoFilesTransferred) os.Exit(exitCodeNoFilesTransferred)
} }

View File

@ -43,7 +43,7 @@ password to protect your configuration.
`, `,
Run: func(command *cobra.Command, args []string) { Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args) cmd.CheckArgs(0, 0, command, args)
config.EditConfig() config.EditConfig(context.Background())
}, },
} }

View File

@ -1,6 +1,7 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"log" "log"
"os" "os"
@ -166,8 +167,9 @@ func runRoot(cmd *cobra.Command, args []string) {
// //
// Helpful example: http://rtfcode.com/xref/moby-17.03.2-ce/cli/cobra.go // Helpful example: http://rtfcode.com/xref/moby-17.03.2-ce/cli/cobra.go
func setupRootCommand(rootCmd *cobra.Command) { func setupRootCommand(rootCmd *cobra.Command) {
ci := fs.GetConfig(context.Background())
// Add global flags // Add global flags
configflags.AddFlags(pflag.CommandLine) configflags.AddFlags(ci, pflag.CommandLine)
filterflags.AddFlags(pflag.CommandLine) filterflags.AddFlags(pflag.CommandLine)
rcflags.AddFlags(pflag.CommandLine) rcflags.AddFlags(pflag.CommandLine)
logflags.AddFlags(pflag.CommandLine) logflags.AddFlags(pflag.CommandLine)

View File

@ -249,9 +249,11 @@ func (r *results) checkStringPositions(k, s string) {
// check we can write a file with the control chars // check we can write a file with the control chars
func (r *results) checkControls() { func (r *results) checkControls() {
fs.Infof(r.f, "Trying to create control character file names") fs.Infof(r.f, "Trying to create control character file names")
ci := fs.GetConfig(context.Background())
// Concurrency control // Concurrency control
tokens := make(chan struct{}, fs.Config.Checkers) tokens := make(chan struct{}, ci.Checkers)
for i := 0; i < fs.Config.Checkers; i++ { for i := 0; i < ci.Checkers; i++ {
tokens <- struct{}{} tokens <- struct{}{}
} }
var wg sync.WaitGroup var wg sync.WaitGroup

View File

@ -49,9 +49,10 @@ If you just want the directory names use "rclone lsf --dirs-only".
` + lshelp.Help, ` + lshelp.Help,
Run: func(command *cobra.Command, args []string) { Run: func(command *cobra.Command, args []string) {
ci := fs.GetConfig(context.Background())
cmd.CheckArgs(1, 1, command, args) cmd.CheckArgs(1, 1, command, args)
if recurse { if recurse {
fs.Config.MaxDepth = 0 ci.MaxDepth = 0
} }
fsrc := cmd.NewFsSrc(args) fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error { cmd.Run(false, false, command, func() error {

View File

@ -162,12 +162,13 @@ func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool)
// Scan the Fs passed in, returning a root directory channel and an // Scan the Fs passed in, returning a root directory channel and an
// error channel // error channel
func Scan(ctx context.Context, f fs.Fs) (chan *Dir, chan error, chan struct{}) { func Scan(ctx context.Context, f fs.Fs) (chan *Dir, chan error, chan struct{}) {
ci := fs.GetConfig(ctx)
root := make(chan *Dir, 1) root := make(chan *Dir, 1)
errChan := make(chan error, 1) errChan := make(chan error, 1)
updated := make(chan struct{}, 1) updated := make(chan struct{}, 1)
go func() { go func() {
parents := map[string]*Dir{} parents := map[string]*Dir{}
err := walk.Walk(ctx, f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { err := walk.Walk(ctx, f, "", false, ci.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil { if err != nil {
return err // FIXME mark directory as errored instead of aborting return err // FIXME mark directory as errored instead of aborting
} }

View File

@ -177,7 +177,7 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
} }
// Do HTTP request // Do HTTP request
client := fshttp.NewClient(fs.Config) client := fshttp.NewClient(fs.GetConfig(ctx))
url += path url += path
data, err := json.Marshal(in) data, err := json.Marshal(in)
if err != nil { if err != nil {

View File

@ -145,7 +145,7 @@ func (s *server) serve() (err error) {
// An SSH server is represented by a ServerConfig, which holds // An SSH server is represented by a ServerConfig, which holds
// certificate details and handles authentication of ServerConns. // certificate details and handles authentication of ServerConns.
s.config = &ssh.ServerConfig{ s.config = &ssh.ServerConfig{
ServerVersion: "SSH-2.0-" + fs.Config.UserAgent, ServerVersion: "SSH-2.0-" + fs.GetConfig(s.ctx).UserAgent,
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
fs.Debugf(describeConn(c), "Password login attempt for %s", c.User()) fs.Debugf(describeConn(c), "Password login attempt for %s", c.User())
if s.proxy != nil { if s.proxy != nil {

View File

@ -108,8 +108,9 @@ short options as they conflict with rclone's short options.
opts.CTimeSort = opts.CTimeSort || sort == "ctime" opts.CTimeSort = opts.CTimeSort || sort == "ctime"
opts.NameSort = sort == "name" opts.NameSort = sort == "name"
opts.SizeSort = sort == "size" opts.SizeSort = sort == "size"
ci := fs.GetConfig(context.Background())
if opts.DeepLevel == 0 { if opts.DeepLevel == 0 {
opts.DeepLevel = fs.Config.MaxDepth opts.DeepLevel = ci.MaxDepth
} }
cmd.Run(false, false, command, func() error { cmd.Run(false, false, command, func() error {
return Tree(fsrc, outFile, &opts) return Tree(fsrc, outFile, &opts)

View File

@ -41,6 +41,7 @@ type Account struct {
mu sync.Mutex // mutex protects these values mu sync.Mutex // mutex protects these values
in io.Reader in io.Reader
ctx context.Context // current context for transfer - may change ctx context.Context // current context for transfer - may change
ci *fs.ConfigInfo
origIn io.ReadCloser origIn io.ReadCloser
close io.Closer close io.Closer
size int64 size int64
@ -74,6 +75,7 @@ func newAccountSizeName(ctx context.Context, stats *StatsInfo, in io.ReadCloser,
stats: stats, stats: stats,
in: in, in: in,
ctx: ctx, ctx: ctx,
ci: fs.GetConfig(ctx),
close: in, close: in,
origIn: in, origIn: in,
size: size, size: size,
@ -85,10 +87,10 @@ func newAccountSizeName(ctx context.Context, stats *StatsInfo, in io.ReadCloser,
max: -1, max: -1,
}, },
} }
if fs.Config.CutoffMode == fs.CutoffModeHard { if acc.ci.CutoffMode == fs.CutoffModeHard {
acc.values.max = int64((fs.Config.MaxTransfer)) acc.values.max = int64((acc.ci.MaxTransfer))
} }
currLimit := fs.Config.BwLimitFile.LimitAt(time.Now()) currLimit := acc.ci.BwLimitFile.LimitAt(time.Now())
if currLimit.Bandwidth > 0 { if currLimit.Bandwidth > 0 {
fs.Debugf(acc.name, "Limiting file transfer to %v", currLimit.Bandwidth) fs.Debugf(acc.name, "Limiting file transfer to %v", currLimit.Bandwidth)
acc.tokenBucket = newTokenBucket(currLimit.Bandwidth) acc.tokenBucket = newTokenBucket(currLimit.Bandwidth)
@ -107,14 +109,14 @@ func (acc *Account) WithBuffer() *Account {
} }
acc.withBuf = true acc.withBuf = true
var buffers int var buffers int
if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 { if acc.size >= int64(acc.ci.BufferSize) || acc.size == -1 {
buffers = int(int64(fs.Config.BufferSize) / asyncreader.BufferSize) buffers = int(int64(acc.ci.BufferSize) / asyncreader.BufferSize)
} else { } else {
buffers = int(acc.size / asyncreader.BufferSize) buffers = int(acc.size / asyncreader.BufferSize)
} }
// On big files add a buffer // On big files add a buffer
if buffers > 0 { if buffers > 0 {
rc, err := asyncreader.New(acc.origIn, buffers) rc, err := asyncreader.New(acc.ctx, acc.origIn, buffers)
if err != nil { if err != nil {
fs.Errorf(acc.name, "Failed to make buffer: %v", err) fs.Errorf(acc.name, "Failed to make buffer: %v", err)
} else { } else {
@ -472,7 +474,7 @@ func (acc *Account) String() string {
} }
} }
if fs.Config.DataRateUnit == "bits" { if acc.ci.DataRateUnit == "bits" {
cur = cur * 8 cur = cur * 8
} }
@ -482,8 +484,8 @@ func (acc *Account) String() string {
} }
return fmt.Sprintf("%*s:%3d%% /%s, %s/s, %s", return fmt.Sprintf("%*s:%3d%% /%s, %s/s, %s",
fs.Config.StatsFileNameLength, acc.ci.StatsFileNameLength,
shortenName(acc.name, fs.Config.StatsFileNameLength), shortenName(acc.name, acc.ci.StatsFileNameLength),
percentageDone, percentageDone,
fs.SizeSuffix(b), fs.SizeSuffix(b),
fs.SizeSuffix(cur), fs.SizeSuffix(cur),

View File

@ -258,13 +258,14 @@ func TestAccountAccounter(t *testing.T) {
func TestAccountMaxTransfer(t *testing.T) { func TestAccountMaxTransfer(t *testing.T) {
ctx := context.Background() ctx := context.Background()
old := fs.Config.MaxTransfer ci := fs.GetConfig(ctx)
oldMode := fs.Config.CutoffMode old := ci.MaxTransfer
oldMode := ci.CutoffMode
fs.Config.MaxTransfer = 15 ci.MaxTransfer = 15
defer func() { defer func() {
fs.Config.MaxTransfer = old ci.MaxTransfer = old
fs.Config.CutoffMode = oldMode ci.CutoffMode = oldMode
}() }()
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100))) in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
@ -284,7 +285,7 @@ func TestAccountMaxTransfer(t *testing.T) {
assert.Equal(t, ErrorMaxTransferLimitReachedFatal, err) assert.Equal(t, ErrorMaxTransferLimitReachedFatal, err)
assert.True(t, fserrors.IsFatalError(err)) assert.True(t, fserrors.IsFatalError(err))
fs.Config.CutoffMode = fs.CutoffModeSoft ci.CutoffMode = fs.CutoffModeSoft
stats = NewStats(ctx) stats = NewStats(ctx)
acc = newAccountSizeName(ctx, stats, in, 1, "test") acc = newAccountSizeName(ctx, stats, in, 1, "test")
@ -301,13 +302,14 @@ func TestAccountMaxTransfer(t *testing.T) {
func TestAccountMaxTransferWriteTo(t *testing.T) { func TestAccountMaxTransferWriteTo(t *testing.T) {
ctx := context.Background() ctx := context.Background()
old := fs.Config.MaxTransfer ci := fs.GetConfig(ctx)
oldMode := fs.Config.CutoffMode old := ci.MaxTransfer
oldMode := ci.CutoffMode
fs.Config.MaxTransfer = 15 ci.MaxTransfer = 15
defer func() { defer func() {
fs.Config.MaxTransfer = old ci.MaxTransfer = old
fs.Config.CutoffMode = oldMode ci.CutoffMode = oldMode
}() }()
in := ioutil.NopCloser(readers.NewPatternReader(1024)) in := ioutil.NopCloser(readers.NewPatternReader(1024))

View File

@ -15,8 +15,9 @@ type inProgress struct {
// newInProgress makes a new inProgress object // newInProgress makes a new inProgress object
func newInProgress(ctx context.Context) *inProgress { func newInProgress(ctx context.Context) *inProgress {
ci := fs.GetConfig(ctx)
return &inProgress{ return &inProgress{
m: make(map[string]*Account, fs.Config.Transfers), m: make(map[string]*Account, ci.Transfers),
} }
} }

View File

@ -24,6 +24,7 @@ var startTime = time.Now()
type StatsInfo struct { type StatsInfo struct {
mu sync.RWMutex mu sync.RWMutex
ctx context.Context ctx context.Context
ci *fs.ConfigInfo
bytes int64 bytes int64
errors int64 errors int64
lastError error lastError error
@ -52,10 +53,12 @@ type StatsInfo struct {
// NewStats creates an initialised StatsInfo // NewStats creates an initialised StatsInfo
func NewStats(ctx context.Context) *StatsInfo { func NewStats(ctx context.Context) *StatsInfo {
ci := fs.GetConfig(ctx)
return &StatsInfo{ return &StatsInfo{
ctx: ctx, ctx: ctx,
checking: newTransferMap(fs.Config.Checkers, "checking"), ci: ci,
transferring: newTransferMap(fs.Config.Transfers, "transferring"), checking: newTransferMap(ci.Checkers, "checking"),
transferring: newTransferMap(ci.Transfers, "transferring"),
inProgress: newInProgress(ctx), inProgress: newInProgress(ctx),
} }
} }
@ -243,7 +246,7 @@ func (s *StatsInfo) String() string {
} }
displaySpeed := speed displaySpeed := speed
if fs.Config.DataRateUnit == "bits" { if s.ci.DataRateUnit == "bits" {
displaySpeed *= 8 displaySpeed *= 8
} }
@ -259,7 +262,7 @@ func (s *StatsInfo) String() string {
dateString = "" dateString = ""
) )
if !fs.Config.StatsOneLine { if !s.ci.StatsOneLine {
_, _ = fmt.Fprintf(buf, "\nTransferred: ") _, _ = fmt.Fprintf(buf, "\nTransferred: ")
} else { } else {
xfrchk := []string{} xfrchk := []string{}
@ -272,9 +275,9 @@ func (s *StatsInfo) String() string {
if len(xfrchk) > 0 { if len(xfrchk) > 0 {
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", ")) xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
} }
if fs.Config.StatsOneLineDate { if s.ci.StatsOneLineDate {
t := time.Now() t := time.Now()
dateString = t.Format(fs.Config.StatsOneLineDateFormat) // Including the separator so people can customize it dateString = t.Format(s.ci.StatsOneLineDateFormat) // Including the separator so people can customize it
} }
} }
@ -283,17 +286,17 @@ func (s *StatsInfo) String() string {
fs.SizeSuffix(s.bytes), fs.SizeSuffix(s.bytes),
fs.SizeSuffix(totalSize).Unit("Bytes"), fs.SizeSuffix(totalSize).Unit("Bytes"),
percent(s.bytes, totalSize), percent(s.bytes, totalSize),
fs.SizeSuffix(displaySpeed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"), fs.SizeSuffix(displaySpeed).Unit(strings.Title(s.ci.DataRateUnit)+"/s"),
etaString(currentSize, totalSize, speed), etaString(currentSize, totalSize, speed),
xfrchkString, xfrchkString,
) )
if fs.Config.ProgressTerminalTitle { if s.ci.ProgressTerminalTitle {
// Writes ETA to the terminal title // Writes ETA to the terminal title
terminal.WriteTerminalTitle("ETA: " + etaString(currentSize, totalSize, speed)) terminal.WriteTerminalTitle("ETA: " + etaString(currentSize, totalSize, speed))
} }
if !fs.Config.StatsOneLine { if !s.ci.StatsOneLine {
_, _ = buf.WriteRune('\n') _, _ = buf.WriteRune('\n')
errorDetails := "" errorDetails := ""
switch { switch {
@ -333,7 +336,7 @@ func (s *StatsInfo) String() string {
s.mu.RUnlock() s.mu.RUnlock()
// Add per transfer stats if required // Add per transfer stats if required
if !fs.Config.StatsOneLine { if !s.ci.StatsOneLine {
if !s.checking.empty() { if !s.checking.empty() {
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.ctx, s.inProgress, s.transferring)) _, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.ctx, s.inProgress, s.transferring))
} }
@ -361,11 +364,11 @@ func (s *StatsInfo) Transferred() []TransferSnapshot {
// Log outputs the StatsInfo to the log // Log outputs the StatsInfo to the log
func (s *StatsInfo) Log() { func (s *StatsInfo) Log() {
if fs.Config.UseJSONLog { if s.ci.UseJSONLog {
out, _ := s.RemoteStats() out, _ := s.RemoteStats()
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v%v\n", s, fs.LogValue("stats", out)) fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v%v\n", s, fs.LogValue("stats", out))
} else { } else {
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v\n", s) fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v\n", s)
} }
} }
@ -681,7 +684,7 @@ func (s *StatsInfo) PruneTransfers() {
} }
s.mu.Lock() s.mu.Lock()
// remove a transfer from the start if we are over quota // remove a transfer from the start if we are over quota
if len(s.startedTransfers) > MaxCompletedTransfers+fs.Config.Transfers { if len(s.startedTransfers) > MaxCompletedTransfers+s.ci.Transfers {
for i, tr := range s.startedTransfers { for i, tr := range s.startedTransfers {
if tr.IsDone() { if tr.IsDone() {
s.removeTransfer(tr, i) s.removeTransfer(tr, i)

View File

@ -308,13 +308,14 @@ func newStatsGroups() *statsGroups {
func (sg *statsGroups) set(ctx context.Context, group string, stats *StatsInfo) { func (sg *statsGroups) set(ctx context.Context, group string, stats *StatsInfo) {
sg.mu.Lock() sg.mu.Lock()
defer sg.mu.Unlock() defer sg.mu.Unlock()
ci := fs.GetConfig(ctx)
// Limit number of groups kept in memory. // Limit number of groups kept in memory.
if len(sg.order) >= fs.Config.MaxStatsGroups { if len(sg.order) >= ci.MaxStatsGroups {
group := sg.order[0] group := sg.order[0]
fs.LogPrintf(fs.LogLevelDebug, nil, "Max number of stats groups reached removing %s", group) fs.LogPrintf(fs.LogLevelDebug, nil, "Max number of stats groups reached removing %s", group)
delete(sg.m, group) delete(sg.m, group)
r := (len(sg.order) - fs.Config.MaxStatsGroups) + 1 r := (len(sg.order) - ci.MaxStatsGroups) + 1
sg.order = sg.order[r:] sg.order = sg.order[r:]
} }

View File

@ -386,6 +386,7 @@ func TestTimeRangeDuration(t *testing.T) {
func TestPruneTransfers(t *testing.T) { func TestPruneTransfers(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
for _, test := range []struct { for _, test := range []struct {
Name string Name string
Transfers int Transfers int
@ -396,7 +397,7 @@ func TestPruneTransfers(t *testing.T) {
Name: "Limited number of StartedTransfers", Name: "Limited number of StartedTransfers",
Limit: 100, Limit: 100,
Transfers: 200, Transfers: 200,
ExpectedStartedTransfers: 100 + fs.Config.Transfers, ExpectedStartedTransfers: 100 + ci.Transfers,
}, },
{ {
Name: "Unlimited number of StartedTransfers", Name: "Unlimited number of StartedTransfers",

View File

@ -36,8 +36,9 @@ func newTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter {
// StartTokenBucket starts the token bucket if necessary // StartTokenBucket starts the token bucket if necessary
func StartTokenBucket(ctx context.Context) { func StartTokenBucket(ctx context.Context) {
ci := fs.GetConfig(ctx)
currLimitMu.Lock() currLimitMu.Lock()
currLimit := fs.Config.BwLimit.LimitAt(time.Now()) currLimit := ci.BwLimit.LimitAt(time.Now())
currLimitMu.Unlock() currLimitMu.Unlock()
if currLimit.Bandwidth > 0 { if currLimit.Bandwidth > 0 {
@ -52,16 +53,17 @@ func StartTokenBucket(ctx context.Context) {
// StartTokenTicker creates a ticker to update the bandwidth limiter every minute. // StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
func StartTokenTicker(ctx context.Context) { func StartTokenTicker(ctx context.Context) {
ci := fs.GetConfig(ctx)
// If the timetable has a single entry or was not specified, we don't need // If the timetable has a single entry or was not specified, we don't need
// a ticker to update the bandwidth. // a ticker to update the bandwidth.
if len(fs.Config.BwLimit) <= 1 { if len(ci.BwLimit) <= 1 {
return return
} }
ticker := time.NewTicker(time.Minute) ticker := time.NewTicker(time.Minute)
go func() { go func() {
for range ticker.C { for range ticker.C {
limitNow := fs.Config.BwLimit.LimitAt(time.Now()) limitNow := ci.BwLimit.LimitAt(time.Now())
currLimitMu.Lock() currLimitMu.Lock()
if currLimit.Bandwidth != limitNow.Bandwidth { if currLimit.Bandwidth != limitNow.Bandwidth {

View File

@ -99,10 +99,11 @@ func (tr *Transfer) Done(ctx context.Context, err error) {
acc := tr.acc acc := tr.acc
tr.mu.RUnlock() tr.mu.RUnlock()
ci := fs.GetConfig(ctx)
if acc != nil { if acc != nil {
// Close the file if it is still open // Close the file if it is still open
if err := acc.Close(); err != nil { if err := acc.Close(); err != nil {
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "can't close account: %+v\n", err) fs.LogLevelPrintf(ci.StatsLogLevel, nil, "can't close account: %+v\n", err)
} }
// Signal done with accounting // Signal done with accounting
acc.Done() acc.Done()
@ -128,10 +129,11 @@ func (tr *Transfer) Reset(ctx context.Context) {
acc := tr.acc acc := tr.acc
tr.acc = nil tr.acc = nil
tr.mu.RUnlock() tr.mu.RUnlock()
ci := fs.GetConfig(ctx)
if acc != nil { if acc != nil {
if err := acc.Close(); err != nil { if err := acc.Close(); err != nil {
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "can't close account: %+v\n", err) fs.LogLevelPrintf(ci.StatsLogLevel, nil, "can't close account: %+v\n", err)
} }
} }
} }

View File

@ -92,6 +92,7 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude *transferMap) string { func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude *transferMap) string {
tm.mu.RLock() tm.mu.RLock()
defer tm.mu.RUnlock() defer tm.mu.RUnlock()
ci := fs.GetConfig(ctx)
stringList := make([]string, 0, len(tm.items)) stringList := make([]string, 0, len(tm.items))
for _, tr := range tm._sortedSlice() { for _, tr := range tm._sortedSlice() {
if exclude != nil { if exclude != nil {
@ -107,8 +108,8 @@ func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude
out = acc.String() out = acc.String()
} else { } else {
out = fmt.Sprintf("%*s: %s", out = fmt.Sprintf("%*s: %s",
fs.Config.StatsFileNameLength, ci.StatsFileNameLength,
shortenName(tr.remote, fs.Config.StatsFileNameLength), shortenName(tr.remote, ci.StatsFileNameLength),
tm.name, tm.name,
) )
} }

View File

@ -3,6 +3,7 @@
package asyncreader package asyncreader
import ( import (
"context"
"io" "io"
"sync" "sync"
"time" "time"
@ -29,17 +30,18 @@ var ErrorStreamAbandoned = errors.New("stream abandoned")
// This should be fully transparent, except that once an error // This should be fully transparent, except that once an error
// has been returned from the Reader, it will not recover. // has been returned from the Reader, it will not recover.
type AsyncReader struct { type AsyncReader struct {
in io.ReadCloser // Input reader in io.ReadCloser // Input reader
ready chan *buffer // Buffers ready to be handed to the reader ready chan *buffer // Buffers ready to be handed to the reader
token chan struct{} // Tokens which allow a buffer to be taken token chan struct{} // Tokens which allow a buffer to be taken
exit chan struct{} // Closes when finished exit chan struct{} // Closes when finished
buffers int // Number of buffers buffers int // Number of buffers
err error // If an error has occurred it is here err error // If an error has occurred it is here
cur *buffer // Current buffer being served cur *buffer // Current buffer being served
exited chan struct{} // Channel is closed been the async reader shuts down exited chan struct{} // Channel is closed been the async reader shuts down
size int // size of buffer to use size int // size of buffer to use
closed bool // whether we have closed the underlying stream closed bool // whether we have closed the underlying stream
mu sync.Mutex // lock for Read/WriteTo/Abandon/Close mu sync.Mutex // lock for Read/WriteTo/Abandon/Close
ci *fs.ConfigInfo // for reading config
} }
// New returns a reader that will asynchronously read from // New returns a reader that will asynchronously read from
@ -48,14 +50,16 @@ type AsyncReader struct {
// function has returned. // function has returned.
// The input can be read from the returned reader. // The input can be read from the returned reader.
// When done use Close to release the buffers and close the supplied input. // When done use Close to release the buffers and close the supplied input.
func New(rd io.ReadCloser, buffers int) (*AsyncReader, error) { func New(ctx context.Context, rd io.ReadCloser, buffers int) (*AsyncReader, error) {
if buffers <= 0 { if buffers <= 0 {
return nil, errors.New("number of buffers too small") return nil, errors.New("number of buffers too small")
} }
if rd == nil { if rd == nil {
return nil, errors.New("nil reader supplied") return nil, errors.New("nil reader supplied")
} }
a := &AsyncReader{} a := &AsyncReader{
ci: fs.GetConfig(ctx),
}
a.init(rd, buffers) a.init(rd, buffers)
return a, nil return a, nil
} }
@ -114,7 +118,7 @@ func (a *AsyncReader) putBuffer(b *buffer) {
func (a *AsyncReader) getBuffer() *buffer { func (a *AsyncReader) getBuffer() *buffer {
bufferPoolOnce.Do(func() { bufferPoolOnce.Do(func() {
// Initialise the buffer pool when used // Initialise the buffer pool when used
bufferPool = pool.New(bufferCacheFlushTime, BufferSize, bufferCacheSize, fs.Config.UseMmap) bufferPool = pool.New(bufferCacheFlushTime, BufferSize, bufferCacheSize, a.ci.UseMmap)
}) })
return &buffer{ return &buffer{
buf: bufferPool.Get(), buf: bufferPool.Get(),

View File

@ -3,6 +3,7 @@ package asyncreader
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -20,8 +21,10 @@ import (
) )
func TestAsyncReader(t *testing.T) { func TestAsyncReader(t *testing.T) {
ctx := context.Background()
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer")) buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := New(buf, 4) ar, err := New(ctx, buf, 4)
require.NoError(t, err) require.NoError(t, err)
var dst = make([]byte, 100) var dst = make([]byte, 100)
@ -46,7 +49,7 @@ func TestAsyncReader(t *testing.T) {
// Test Close without reading everything // Test Close without reading everything
buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000))) buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
ar, err = New(buf, 4) ar, err = New(ctx, buf, 4)
require.NoError(t, err) require.NoError(t, err)
err = ar.Close() err = ar.Close()
require.NoError(t, err) require.NoError(t, err)
@ -54,8 +57,10 @@ func TestAsyncReader(t *testing.T) {
} }
func TestAsyncWriteTo(t *testing.T) { func TestAsyncWriteTo(t *testing.T) {
ctx := context.Background()
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer")) buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := New(buf, 4) ar, err := New(ctx, buf, 4)
require.NoError(t, err) require.NoError(t, err)
var dst = &bytes.Buffer{} var dst = &bytes.Buffer{}
@ -73,15 +78,17 @@ func TestAsyncWriteTo(t *testing.T) {
} }
func TestAsyncReaderErrors(t *testing.T) { func TestAsyncReaderErrors(t *testing.T) {
ctx := context.Background()
// test nil reader // test nil reader
_, err := New(nil, 4) _, err := New(ctx, nil, 4)
require.Error(t, err) require.Error(t, err)
// invalid buffer number // invalid buffer number
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer")) buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
_, err = New(buf, 0) _, err = New(ctx, buf, 0)
require.Error(t, err) require.Error(t, err)
_, err = New(buf, -1) _, err = New(ctx, buf, -1)
require.Error(t, err) require.Error(t, err)
} }
@ -140,6 +147,8 @@ var bufsizes = []int{
// Test various input buffer sizes, number of buffers and read sizes. // Test various input buffer sizes, number of buffers and read sizes.
func TestAsyncReaderSizes(t *testing.T) { func TestAsyncReaderSizes(t *testing.T) {
ctx := context.Background()
var texts [31]string var texts [31]string
str := "" str := ""
all := "" all := ""
@ -161,7 +170,7 @@ func TestAsyncReaderSizes(t *testing.T) {
bufsize := bufsizes[k] bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text)) read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize) buf := bufio.NewReaderSize(read, bufsize)
ar, _ := New(ioutil.NopCloser(buf), l) ar, _ := New(ctx, ioutil.NopCloser(buf), l)
s := bufreader.fn(ar) s := bufreader.fn(ar)
// "timeout" expects the Reader to recover, AsyncReader does not. // "timeout" expects the Reader to recover, AsyncReader does not.
if s != text && readmaker.name != "timeout" { if s != text && readmaker.name != "timeout" {
@ -179,6 +188,8 @@ func TestAsyncReaderSizes(t *testing.T) {
// Test various input buffer sizes, number of buffers and read sizes. // Test various input buffer sizes, number of buffers and read sizes.
func TestAsyncReaderWriteTo(t *testing.T) { func TestAsyncReaderWriteTo(t *testing.T) {
ctx := context.Background()
var texts [31]string var texts [31]string
str := "" str := ""
all := "" all := ""
@ -200,7 +211,7 @@ func TestAsyncReaderWriteTo(t *testing.T) {
bufsize := bufsizes[k] bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text)) read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize) buf := bufio.NewReaderSize(read, bufsize)
ar, _ := New(ioutil.NopCloser(buf), l) ar, _ := New(ctx, ioutil.NopCloser(buf), l)
dst := &bytes.Buffer{} dst := &bytes.Buffer{}
_, err := ar.WriteTo(dst) _, err := ar.WriteTo(dst)
if err != nil && err != io.EOF && err != iotest.ErrTimeout { if err != nil && err != io.EOF && err != iotest.ErrTimeout {
@ -246,8 +257,10 @@ func (z *zeroReader) Close() error {
// Test closing and abandoning // Test closing and abandoning
func testAsyncReaderClose(t *testing.T, writeto bool) { func testAsyncReaderClose(t *testing.T, writeto bool) {
ctx := context.Background()
zr := &zeroReader{} zr := &zeroReader{}
a, err := New(zr, 16) a, err := New(ctx, zr, 16)
require.NoError(t, err) require.NoError(t, err)
var copyN int64 var copyN int64
var copyErr error var copyErr error
@ -287,6 +300,8 @@ func TestAsyncReaderCloseRead(t *testing.T) { testAsyncReaderClose(t, false)
func TestAsyncReaderCloseWriteTo(t *testing.T) { testAsyncReaderClose(t, true) } func TestAsyncReaderCloseWriteTo(t *testing.T) { testAsyncReaderClose(t, true) }
func TestAsyncReaderSkipBytes(t *testing.T) { func TestAsyncReaderSkipBytes(t *testing.T) {
ctx := context.Background()
t.Parallel() t.Parallel()
data := make([]byte, 15000) data := make([]byte, 15000)
buf := make([]byte, len(data)) buf := make([]byte, len(data))
@ -312,7 +327,7 @@ func TestAsyncReaderSkipBytes(t *testing.T) {
t.Run(fmt.Sprintf("%d", initialRead), func(t *testing.T) { t.Run(fmt.Sprintf("%d", initialRead), func(t *testing.T) {
for _, skip := range skips { for _, skip := range skips {
t.Run(fmt.Sprintf("%d", skip), func(t *testing.T) { t.Run(fmt.Sprintf("%d", skip), func(t *testing.T) {
ar, err := New(ioutil.NopCloser(bytes.NewReader(data)), buffers) ar, err := New(ctx, ioutil.NopCloser(bytes.NewReader(data)), buffers)
require.NoError(t, err) require.NoError(t, err)
wantSkipFalse := false wantSkipFalse := false

View File

@ -1,6 +1,7 @@
package fs package fs
import ( import (
"context"
"net" "net"
"strings" "strings"
"time" "time"
@ -10,8 +11,8 @@ import (
// Global // Global
var ( var (
// Config is the global config // globalConfig for rclone
Config = NewConfig() globalConfig = NewConfig()
// Read a value from the config file // Read a value from the config file
// //
@ -162,6 +163,34 @@ func NewConfig() *ConfigInfo {
return c return c
} }
type configContextKeyType struct{}
// Context key for config
var configContextKey = configContextKeyType{}
// GetConfig returns the global or context sensitive context
func GetConfig(ctx context.Context) *ConfigInfo {
if ctx == nil {
return globalConfig
}
c := ctx.Value(configContextKey)
if c == nil {
return globalConfig
}
return c.(*ConfigInfo)
}
// AddConfig returns a mutable config structure based on a shallow
// copy of that found in ctx and returns a new context with that added
// to it.
func AddConfig(ctx context.Context) (context.Context, *ConfigInfo) {
c := GetConfig(ctx)
cCopy := new(ConfigInfo)
*cCopy = *c
newCtx := context.WithValue(ctx, configContextKey, cCopy)
return newCtx, cCopy
}
// ConfigToEnv converts a config section and name, e.g. ("myremote", // ConfigToEnv converts a config section and name, e.g. ("myremote",
// "ignore-size") into an environment name // "ignore-size") into an environment name
// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE" // "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"

View File

@ -236,7 +236,7 @@ func LoadConfig(ctx context.Context) {
accounting.StartTokenTicker(ctx) accounting.StartTokenTicker(ctx)
// Start the transactions per second limiter // Start the transactions per second limiter
fshttp.StartHTTPTokenBucket() fshttp.StartHTTPTokenBucket(ctx)
} }
var errorConfigFileNotFound = errors.New("config file not found") var errorConfigFileNotFound = errors.New("config file not found")
@ -244,6 +244,8 @@ var errorConfigFileNotFound = errors.New("config file not found")
// loadConfigFile will load a config file, and // loadConfigFile will load a config file, and
// automatically decrypt it. // automatically decrypt it.
func loadConfigFile() (*goconfig.ConfigFile, error) { func loadConfigFile() (*goconfig.ConfigFile, error) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
var usingPasswordCommand bool var usingPasswordCommand bool
b, err := ioutil.ReadFile(ConfigPath) b, err := ioutil.ReadFile(ConfigPath)
@ -278,11 +280,11 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
} }
if len(configKey) == 0 { if len(configKey) == 0 {
if len(fs.Config.PasswordCommand) != 0 { if len(ci.PasswordCommand) != 0 {
var stdout bytes.Buffer var stdout bytes.Buffer
var stderr bytes.Buffer var stderr bytes.Buffer
cmd := exec.Command(fs.Config.PasswordCommand[0], fs.Config.PasswordCommand[1:]...) cmd := exec.Command(ci.PasswordCommand[0], ci.PasswordCommand[1:]...)
cmd.Stdout = &stdout cmd.Stdout = &stdout
cmd.Stderr = &stderr cmd.Stderr = &stderr
@ -358,7 +360,7 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
if usingPasswordCommand { if usingPasswordCommand {
return nil, errors.New("using --password-command derived password, unable to decrypt configuration") return nil, errors.New("using --password-command derived password, unable to decrypt configuration")
} }
if !fs.Config.AskPassword { if !ci.AskPassword {
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password") return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
} }
getConfigPassword("Enter configuration password:") getConfigPassword("Enter configuration password:")
@ -600,15 +602,17 @@ func saveConfig() error {
// SaveConfig calling function which saves configuration file. // SaveConfig calling function which saves configuration file.
// if saveConfig returns error trying again after sleep. // if saveConfig returns error trying again after sleep.
func SaveConfig() { func SaveConfig() {
ctx := context.Background()
ci := fs.GetConfig(ctx)
var err error var err error
for i := 0; i < fs.Config.LowLevelRetries+1; i++ { for i := 0; i < ci.LowLevelRetries+1; i++ {
if err = saveConfig(); err == nil { if err = saveConfig(); err == nil {
return return
} }
waitingTimeMs := mathrand.Intn(1000) waitingTimeMs := mathrand.Intn(1000)
time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond) time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond)
} }
log.Fatalf("Failed to save config after %d tries: %v", fs.Config.LowLevelRetries, err) log.Fatalf("Failed to save config after %d tries: %v", ci.LowLevelRetries, err)
return return
} }
@ -746,7 +750,8 @@ func Confirm(Default bool) bool {
// that, but if it isn't set then it will return the Default value // that, but if it isn't set then it will return the Default value
// passed in // passed in
func ConfirmWithConfig(ctx context.Context, m configmap.Getter, configName string, Default bool) bool { func ConfirmWithConfig(ctx context.Context, m configmap.Getter, configName string, Default bool) bool {
if fs.Config.AutoConfirm { ci := fs.GetConfig(ctx)
if ci.AutoConfirm {
configString, ok := m.Get(configName) configString, ok := m.Get(configName)
if ok { if ok {
configValue, err := strconv.ParseBool(configString) configValue, err := strconv.ParseBool(configString)
@ -897,12 +902,12 @@ func MustFindByName(name string) *fs.RegInfo {
} }
// RemoteConfig runs the config helper for the remote if needed // RemoteConfig runs the config helper for the remote if needed
func RemoteConfig(name string) { func RemoteConfig(ctx context.Context, name string) {
fmt.Printf("Remote config\n") fmt.Printf("Remote config\n")
f := MustFindByName(name) f := MustFindByName(name)
if f.Config != nil { if f.Config != nil {
m := fs.ConfigMap(f, name) m := fs.ConfigMap(f, name)
f.Config(context.Background(), name, m) f.Config(ctx, name, m)
} }
} }
@ -1023,13 +1028,11 @@ func ChooseOption(o *fs.Option, name string) string {
return in return in
} }
// Suppress the confirm prompts and return a function to undo that // Suppress the confirm prompts by altering the context config
func suppressConfirm(ctx context.Context) func() { func suppressConfirm(ctx context.Context) context.Context {
old := fs.Config.AutoConfirm newCtx, ci := fs.AddConfig(ctx)
fs.Config.AutoConfirm = true ci.AutoConfirm = true
return func() { return newCtx
fs.Config.AutoConfirm = old
}
} }
// UpdateRemote adds the keyValues passed in to the remote of name. // UpdateRemote adds the keyValues passed in to the remote of name.
@ -1042,7 +1045,7 @@ func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, doObscu
if err != nil { if err != nil {
return err return err
} }
defer suppressConfirm(ctx)() ctx = suppressConfirm(ctx)
// Work out which options need to be obscured // Work out which options need to be obscured
needsObscure := map[string]struct{}{} needsObscure := map[string]struct{}{}
@ -1079,7 +1082,7 @@ func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, doObscu
} }
getConfigData().SetValue(name, k, vStr) getConfigData().SetValue(name, k, vStr)
} }
RemoteConfig(name) RemoteConfig(ctx, name)
SaveConfig() SaveConfig()
return nil return nil
} }
@ -1103,11 +1106,11 @@ func CreateRemote(ctx context.Context, name string, provider string, keyValues r
// PasswordRemote adds the keyValues passed in to the remote of name. // PasswordRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs. // keyValues should be key, value pairs.
func PasswordRemote(ctx context.Context, name string, keyValues rc.Params) error { func PasswordRemote(ctx context.Context, name string, keyValues rc.Params) error {
ctx = suppressConfirm(ctx)
err := fspath.CheckConfigName(name) err := fspath.CheckConfigName(name)
if err != nil { if err != nil {
return err return err
} }
defer suppressConfirm(ctx)()
for k, v := range keyValues { for k, v := range keyValues {
keyValues[k] = obscure.MustObscure(fmt.Sprint(v)) keyValues[k] = obscure.MustObscure(fmt.Sprint(v))
} }
@ -1206,7 +1209,7 @@ func editOptions(ri *fs.RegInfo, name string, isNew bool) {
} }
// NewRemote make a new remote from its name // NewRemote make a new remote from its name
func NewRemote(name string) { func NewRemote(ctx context.Context, name string) {
var ( var (
newType string newType string
ri *fs.RegInfo ri *fs.RegInfo
@ -1226,16 +1229,16 @@ func NewRemote(name string) {
getConfigData().SetValue(name, "type", newType) getConfigData().SetValue(name, "type", newType)
editOptions(ri, name, true) editOptions(ri, name, true)
RemoteConfig(name) RemoteConfig(ctx, name)
if OkRemote(name) { if OkRemote(name) {
SaveConfig() SaveConfig()
return return
} }
EditRemote(ri, name) EditRemote(ctx, ri, name)
} }
// EditRemote gets the user to edit a remote // EditRemote gets the user to edit a remote
func EditRemote(ri *fs.RegInfo, name string) { func EditRemote(ctx context.Context, ri *fs.RegInfo, name string) {
ShowRemote(name) ShowRemote(name)
fmt.Printf("Edit remote\n") fmt.Printf("Edit remote\n")
for { for {
@ -1245,7 +1248,7 @@ func EditRemote(ri *fs.RegInfo, name string) {
} }
} }
SaveConfig() SaveConfig()
RemoteConfig(name) RemoteConfig(ctx, name)
} }
// DeleteRemote gets the user to delete a remote // DeleteRemote gets the user to delete a remote
@ -1307,7 +1310,7 @@ func ShowConfig() {
} }
// EditConfig edits the config file interactively // EditConfig edits the config file interactively
func EditConfig() { func EditConfig(ctx context.Context) {
for { for {
haveRemotes := len(getConfigData().GetSectionList()) != 0 haveRemotes := len(getConfigData().GetSectionList()) != 0
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"} what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"}
@ -1324,9 +1327,9 @@ func EditConfig() {
case 'e': case 'e':
name := ChooseRemote() name := ChooseRemote()
fs := MustFindByName(name) fs := MustFindByName(name)
EditRemote(fs, name) EditRemote(ctx, fs, name)
case 'n': case 'n':
NewRemote(NewRemoteName()) NewRemote(ctx, NewRemoteName())
case 'd': case 'd':
name := ChooseRemote() name := ChooseRemote()
DeleteRemote(name) DeleteRemote(name)
@ -1388,7 +1391,7 @@ func SetPassword() {
// rclone authorize "fs name" // rclone authorize "fs name"
// rclone authorize "fs name" "client id" "client secret" // rclone authorize "fs name" "client id" "client secret"
func Authorize(ctx context.Context, args []string, noAutoBrowser bool) { func Authorize(ctx context.Context, args []string, noAutoBrowser bool) {
defer suppressConfirm(ctx)() ctx = suppressConfirm(ctx)
switch len(args) { switch len(args) {
case 1, 3: case 1, 3:
default: default:

View File

@ -17,6 +17,7 @@ import (
func testConfigFile(t *testing.T, configFileName string) func() { func testConfigFile(t *testing.T, configFileName string) func() {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
configKey = nil // reset password configKey = nil // reset password
_ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE") _ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE")
_ = os.Unsetenv("RCLONE_CONFIG_PASS") _ = os.Unsetenv("RCLONE_CONFIG_PASS")
@ -29,13 +30,13 @@ func testConfigFile(t *testing.T, configFileName string) func() {
// temporarily adapt configuration // temporarily adapt configuration
oldOsStdout := os.Stdout oldOsStdout := os.Stdout
oldConfigPath := ConfigPath oldConfigPath := ConfigPath
oldConfig := fs.Config oldConfig := *ci
oldConfigFile := configFile oldConfigFile := configFile
oldReadLine := ReadLine oldReadLine := ReadLine
oldPassword := Password oldPassword := Password
os.Stdout = nil os.Stdout = nil
ConfigPath = path ConfigPath = path
fs.Config = &fs.ConfigInfo{} ci = &fs.ConfigInfo{}
configFile = nil configFile = nil
LoadConfig(ctx) LoadConfig(ctx)
@ -67,7 +68,7 @@ func testConfigFile(t *testing.T, configFileName string) func() {
ConfigPath = oldConfigPath ConfigPath = oldConfigPath
ReadLine = oldReadLine ReadLine = oldReadLine
Password = oldPassword Password = oldPassword
fs.Config = oldConfig *ci = oldConfig
configFile = oldConfigFile configFile = oldConfigFile
_ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE") _ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE")
@ -87,6 +88,7 @@ func makeReadLine(answers []string) func() string {
func TestCRUD(t *testing.T) { func TestCRUD(t *testing.T) {
defer testConfigFile(t, "crud.conf")() defer testConfigFile(t, "crud.conf")()
ctx := context.Background()
// script for creating remote // script for creating remote
ReadLine = makeReadLine([]string{ ReadLine = makeReadLine([]string{
@ -97,7 +99,7 @@ func TestCRUD(t *testing.T) {
"secret", // repeat "secret", // repeat
"y", // looks good, save "y", // looks good, save
}) })
NewRemote("test") NewRemote(ctx, "test")
assert.Equal(t, []string{"test"}, configFile.GetSectionList()) assert.Equal(t, []string{"test"}, configFile.GetSectionList())
assert.Equal(t, "config_test_remote", FileGet("test", "type")) assert.Equal(t, "config_test_remote", FileGet("test", "type"))
@ -124,6 +126,7 @@ func TestCRUD(t *testing.T) {
func TestChooseOption(t *testing.T) { func TestChooseOption(t *testing.T) {
defer testConfigFile(t, "crud.conf")() defer testConfigFile(t, "crud.conf")()
ctx := context.Background()
// script for creating remote // script for creating remote
ReadLine = makeReadLine([]string{ ReadLine = makeReadLine([]string{
@ -139,7 +142,7 @@ func TestChooseOption(t *testing.T) {
assert.Equal(t, 1024, bits) assert.Equal(t, 1024, bits)
return "not very random password", nil return "not very random password", nil
} }
NewRemote("test") NewRemote(ctx, "test")
assert.Equal(t, "false", FileGet("test", "bool")) assert.Equal(t, "false", FileGet("test", "bool"))
assert.Equal(t, "not very random password", obscure.MustReveal(FileGet("test", "pass"))) assert.Equal(t, "not very random password", obscure.MustReveal(FileGet("test", "pass")))
@ -151,7 +154,7 @@ func TestChooseOption(t *testing.T) {
"n", // not required "n", // not required
"y", // looks good, save "y", // looks good, save
}) })
NewRemote("test") NewRemote(ctx, "test")
assert.Equal(t, "true", FileGet("test", "bool")) assert.Equal(t, "true", FileGet("test", "bool"))
assert.Equal(t, "", FileGet("test", "pass")) assert.Equal(t, "", FileGet("test", "pass"))
@ -159,6 +162,7 @@ func TestChooseOption(t *testing.T) {
func TestNewRemoteName(t *testing.T) { func TestNewRemoteName(t *testing.T) {
defer testConfigFile(t, "crud.conf")() defer testConfigFile(t, "crud.conf")()
ctx := context.Background()
// script for creating remote // script for creating remote
ReadLine = makeReadLine([]string{ ReadLine = makeReadLine([]string{
@ -167,7 +171,7 @@ func TestNewRemoteName(t *testing.T) {
"n", // not required "n", // not required
"y", // looks good, save "y", // looks good, save
}) })
NewRemote("test") NewRemote(ctx, "test")
ReadLine = makeReadLine([]string{ ReadLine = makeReadLine([]string{
"test", // already exists "test", // already exists
@ -293,16 +297,18 @@ func TestConfigLoadEncrypted(t *testing.T) {
} }
func TestConfigLoadEncryptedWithValidPassCommand(t *testing.T) { func TestConfigLoadEncryptedWithValidPassCommand(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
oldConfigPath := ConfigPath oldConfigPath := ConfigPath
oldConfig := fs.Config oldConfig := *ci
ConfigPath = "./testdata/encrypted.conf" ConfigPath = "./testdata/encrypted.conf"
// using fs.Config.PasswordCommand, correct password // using ci.PasswordCommand, correct password
fs.Config.PasswordCommand = fs.SpaceSepList{"echo", "asdf"} ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf"}
defer func() { defer func() {
ConfigPath = oldConfigPath ConfigPath = oldConfigPath
configKey = nil // reset password configKey = nil // reset password
fs.Config = oldConfig *ci = oldConfig
fs.Config.PasswordCommand = nil ci.PasswordCommand = nil
}() }()
configKey = nil // reset password configKey = nil // reset password
@ -320,16 +326,18 @@ func TestConfigLoadEncryptedWithValidPassCommand(t *testing.T) {
} }
func TestConfigLoadEncryptedWithInvalidPassCommand(t *testing.T) { func TestConfigLoadEncryptedWithInvalidPassCommand(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
oldConfigPath := ConfigPath oldConfigPath := ConfigPath
oldConfig := fs.Config oldConfig := *ci
ConfigPath = "./testdata/encrypted.conf" ConfigPath = "./testdata/encrypted.conf"
// using fs.Config.PasswordCommand, incorrect password // using ci.PasswordCommand, incorrect password
fs.Config.PasswordCommand = fs.SpaceSepList{"echo", "asdf-blurfl"} ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf-blurfl"}
defer func() { defer func() {
ConfigPath = oldConfigPath ConfigPath = oldConfigPath
configKey = nil // reset password configKey = nil // reset password
fs.Config = oldConfig *ci = oldConfig
fs.Config.PasswordCommand = nil ci.PasswordCommand = nil
}() }()
configKey = nil // reset password configKey = nil // reset password

View File

@ -35,96 +35,96 @@ var (
) )
// AddFlags adds the non filing system specific flags to the command // AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) { func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
rc.AddOption("main", fs.Config) rc.AddOption("main", ci)
// NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig // NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig
flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)") flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)")
flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible") flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible")
flags.DurationVarP(flagSet, &fs.Config.ModifyWindow, "modify-window", "", fs.Config.ModifyWindow, "Max time diff to be considered the same") flags.DurationVarP(flagSet, &ci.ModifyWindow, "modify-window", "", ci.ModifyWindow, "Max time diff to be considered the same")
flags.IntVarP(flagSet, &fs.Config.Checkers, "checkers", "", fs.Config.Checkers, "Number of checkers to run in parallel.") flags.IntVarP(flagSet, &ci.Checkers, "checkers", "", ci.Checkers, "Number of checkers to run in parallel.")
flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.") flags.IntVarP(flagSet, &ci.Transfers, "transfers", "", ci.Transfers, "Number of file transfers to run in parallel.")
flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.") flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.")
flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.") flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.")
flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size") flags.BoolVarP(flagSet, &ci.CheckSum, "checksum", "c", ci.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size")
flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum") flags.BoolVarP(flagSet, &ci.SizeOnly, "size-only", "", ci.SizeOnly, "Skip based on size only, not mod-time or checksum")
flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files") flags.BoolVarP(flagSet, &ci.IgnoreTimes, "ignore-times", "I", ci.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination") flags.BoolVarP(flagSet, &ci.IgnoreExisting, "ignore-existing", "", ci.IgnoreExisting, "Skip all files that exist on destination")
flags.BoolVarP(flagSet, &fs.Config.IgnoreErrors, "ignore-errors", "", fs.Config.IgnoreErrors, "delete even if there are I/O errors") flags.BoolVarP(flagSet, &ci.IgnoreErrors, "ignore-errors", "", ci.IgnoreErrors, "delete even if there are I/O errors")
flags.BoolVarP(flagSet, &fs.Config.DryRun, "dry-run", "n", fs.Config.DryRun, "Do a trial run with no permanent changes") flags.BoolVarP(flagSet, &ci.DryRun, "dry-run", "n", ci.DryRun, "Do a trial run with no permanent changes")
flags.BoolVarP(flagSet, &fs.Config.Interactive, "interactive", "i", fs.Config.Interactive, "Enable interactive mode") flags.BoolVarP(flagSet, &ci.Interactive, "interactive", "i", ci.Interactive, "Enable interactive mode")
flags.DurationVarP(flagSet, &fs.Config.ConnectTimeout, "contimeout", "", fs.Config.ConnectTimeout, "Connect timeout") flags.DurationVarP(flagSet, &ci.ConnectTimeout, "contimeout", "", ci.ConnectTimeout, "Connect timeout")
flags.DurationVarP(flagSet, &fs.Config.Timeout, "timeout", "", fs.Config.Timeout, "IO idle timeout") flags.DurationVarP(flagSet, &ci.Timeout, "timeout", "", ci.Timeout, "IO idle timeout")
flags.DurationVarP(flagSet, &fs.Config.ExpectContinueTimeout, "expect-continue-timeout", "", fs.Config.ExpectContinueTimeout, "Timeout when using expect / 100-continue in HTTP") flags.DurationVarP(flagSet, &ci.ExpectContinueTimeout, "expect-continue-timeout", "", ci.ExpectContinueTimeout, "Timeout when using expect / 100-continue in HTTP")
flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP headers - may contain sensitive info") flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info") flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
flags.BoolVarP(flagSet, &fs.Config.InsecureSkipVerify, "no-check-certificate", "", fs.Config.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.") flags.BoolVarP(flagSet, &ci.InsecureSkipVerify, "no-check-certificate", "", ci.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.")
flags.BoolVarP(flagSet, &fs.Config.AskPassword, "ask-password", "", fs.Config.AskPassword, "Allow prompt for password for encrypted configuration.") flags.BoolVarP(flagSet, &ci.AskPassword, "ask-password", "", ci.AskPassword, "Allow prompt for password for encrypted configuration.")
flags.FVarP(flagSet, &fs.Config.PasswordCommand, "password-command", "", "Command for supplying password for encrypted configuration.") flags.FVarP(flagSet, &ci.PasswordCommand, "password-command", "", "Command for supplying password for encrypted configuration.")
flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transferring") flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transferring")
flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer") flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer")
flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)") flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)")
flags.Int64VarP(flagSet, &fs.Config.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes") flags.Int64VarP(flagSet, &ci.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
flags.BoolVarP(flagSet, &fs.Config.TrackRenames, "track-renames", "", fs.Config.TrackRenames, "When synchronizing, track file renames and do a server-side move if possible") flags.BoolVarP(flagSet, &ci.TrackRenames, "track-renames", "", ci.TrackRenames, "When synchronizing, track file renames and do a server-side move if possible")
flags.StringVarP(flagSet, &fs.Config.TrackRenamesStrategy, "track-renames-strategy", "", fs.Config.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf") flags.StringVarP(flagSet, &ci.TrackRenamesStrategy, "track-renames-strategy", "", ci.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf")
flags.IntVarP(flagSet, &fs.Config.LowLevelRetries, "low-level-retries", "", fs.Config.LowLevelRetries, "Number of low level retries to do.") flags.IntVarP(flagSet, &ci.LowLevelRetries, "low-level-retries", "", ci.LowLevelRetries, "Number of low level retries to do.")
flags.BoolVarP(flagSet, &fs.Config.UpdateOlder, "update", "u", fs.Config.UpdateOlder, "Skip files that are newer on the destination.") flags.BoolVarP(flagSet, &ci.UpdateOlder, "update", "u", ci.UpdateOlder, "Skip files that are newer on the destination.")
flags.BoolVarP(flagSet, &fs.Config.UseServerModTime, "use-server-modtime", "", fs.Config.UseServerModTime, "Use server modified time instead of object metadata") flags.BoolVarP(flagSet, &ci.UseServerModTime, "use-server-modtime", "", ci.UseServerModTime, "Use server modified time instead of object metadata")
flags.BoolVarP(flagSet, &fs.Config.NoGzip, "no-gzip-encoding", "", fs.Config.NoGzip, "Don't set Accept-Encoding: gzip.") flags.BoolVarP(flagSet, &ci.NoGzip, "no-gzip-encoding", "", ci.NoGzip, "Don't set Accept-Encoding: gzip.")
flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.") flags.IntVarP(flagSet, &ci.MaxDepth, "max-depth", "", ci.MaxDepth, "If set limits the recursion depth to this.")
flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.") flags.BoolVarP(flagSet, &ci.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.") flags.BoolVarP(flagSet, &ci.IgnoreChecksum, "ignore-checksum", "", ci.IgnoreChecksum, "Skip post copy check of checksums.")
flags.BoolVarP(flagSet, &fs.Config.IgnoreCaseSync, "ignore-case-sync", "", fs.Config.IgnoreCaseSync, "Ignore case when synchronizing") flags.BoolVarP(flagSet, &ci.IgnoreCaseSync, "ignore-case-sync", "", ci.IgnoreCaseSync, "Ignore case when synchronizing")
flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.") flags.BoolVarP(flagSet, &ci.NoTraverse, "no-traverse", "", ci.NoTraverse, "Don't traverse destination file system on copy.")
flags.BoolVarP(flagSet, &fs.Config.CheckFirst, "check-first", "", fs.Config.CheckFirst, "Do all the checks before starting transfers.") flags.BoolVarP(flagSet, &ci.CheckFirst, "check-first", "", ci.CheckFirst, "Do all the checks before starting transfers.")
flags.BoolVarP(flagSet, &fs.Config.NoCheckDest, "no-check-dest", "", fs.Config.NoCheckDest, "Don't check the destination, copy regardless.") flags.BoolVarP(flagSet, &ci.NoCheckDest, "no-check-dest", "", ci.NoCheckDest, "Don't check the destination, copy regardless.")
flags.BoolVarP(flagSet, &fs.Config.NoUnicodeNormalization, "no-unicode-normalization", "", fs.Config.NoUnicodeNormalization, "Don't normalize unicode characters in filenames.") flags.BoolVarP(flagSet, &ci.NoUnicodeNormalization, "no-unicode-normalization", "", ci.NoUnicodeNormalization, "Don't normalize unicode characters in filenames.")
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.") flags.BoolVarP(flagSet, &ci.NoUpdateModTime, "no-update-modtime", "", ci.NoUpdateModTime, "Don't update destination mod-time if files identical.")
flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "Include additional server-side path during comparison.") flags.StringVarP(flagSet, &ci.CompareDest, "compare-dest", "", ci.CompareDest, "Include additional server-side path during comparison.")
flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Implies --compare-dest but also copies files from path into destination.") flags.StringVarP(flagSet, &ci.CopyDest, "copy-dest", "", ci.CopyDest, "Implies --compare-dest but also copies files from path into destination.")
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.") flags.StringVarP(flagSet, &ci.BackupDir, "backup-dir", "", ci.BackupDir, "Make backups into hierarchy based in DIR.")
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix to add to changed files.") flags.StringVarP(flagSet, &ci.Suffix, "suffix", "", ci.Suffix, "Suffix to add to changed files.")
flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.") flags.BoolVarP(flagSet, &ci.SuffixKeepExtension, "suffix-keep-extension", "", ci.SuffixKeepExtension, "Preserve the extension when using --suffix.")
flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.") flags.BoolVarP(flagSet, &ci.UseListR, "fast-list", "", ci.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.") flags.Float64VarP(flagSet, &ci.TPSLimit, "tpslimit", "", ci.TPSLimit, "Limit HTTP transactions per second to this.")
flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.") flags.IntVarP(flagSet, &ci.TPSLimitBurst, "tpslimit-burst", "", ci.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.") flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.")
flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features. Use help to see a list.") flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features. Use help to see a list.")
flags.StringVarP(flagSet, &fs.Config.UserAgent, "user-agent", "", fs.Config.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version") flags.StringVarP(flagSet, &ci.UserAgent, "user-agent", "", ci.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version")
flags.BoolVarP(flagSet, &fs.Config.Immutable, "immutable", "", fs.Config.Immutable, "Do not modify files. Fail if existing files have been modified.") flags.BoolVarP(flagSet, &ci.Immutable, "immutable", "", ci.Immutable, "Do not modify files. Fail if existing files have been modified.")
flags.BoolVarP(flagSet, &fs.Config.AutoConfirm, "auto-confirm", "", fs.Config.AutoConfirm, "If enabled, do not request console confirmation.") flags.BoolVarP(flagSet, &ci.AutoConfirm, "auto-confirm", "", ci.AutoConfirm, "If enabled, do not request console confirmation.")
flags.IntVarP(flagSet, &fs.Config.StatsFileNameLength, "stats-file-name-length", "", fs.Config.StatsFileNameLength, "Max file name length in stats. 0 for no limit") flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.") flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
flags.FVarP(flagSet, &fs.Config.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable.") flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable.")
flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.") flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.") flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList) flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.") flags.FVarP(flagSet, &ci.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
flags.DurationVarP(flagSet, &fs.Config.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for.") flags.DurationVarP(flagSet, &ci.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for.")
flags.FVarP(flagSet, &fs.Config.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS") flags.FVarP(flagSet, &ci.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS")
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.") flags.IntVarP(flagSet, &ci.MaxBacklog, "max-backlog", "", ci.MaxBacklog, "Maximum number of objects in sync or check backlog.")
flags.IntVarP(flagSet, &fs.Config.MaxStatsGroups, "max-stats-groups", "", fs.Config.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.") flags.IntVarP(flagSet, &ci.MaxStatsGroups, "max-stats-groups", "", ci.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.")
flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.") flags.BoolVarP(flagSet, &ci.StatsOneLine, "stats-one-line", "", ci.StatsOneLine, "Make the stats fit on one line.")
flags.BoolVarP(flagSet, &fs.Config.StatsOneLineDate, "stats-one-line-date", "", fs.Config.StatsOneLineDate, "Enables --stats-one-line and add current date/time prefix.") flags.BoolVarP(flagSet, &ci.StatsOneLineDate, "stats-one-line-date", "", ci.StatsOneLineDate, "Enables --stats-one-line and add current date/time prefix.")
flags.StringVarP(flagSet, &fs.Config.StatsOneLineDateFormat, "stats-one-line-date-format", "", fs.Config.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format") flags.StringVarP(flagSet, &ci.StatsOneLineDateFormat, "stats-one-line-date-format", "", ci.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format")
flags.BoolVarP(flagSet, &fs.Config.ErrorOnNoTransfer, "error-on-no-transfer", "", fs.Config.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts") flags.BoolVarP(flagSet, &ci.ErrorOnNoTransfer, "error-on-no-transfer", "", ci.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts")
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.") flags.BoolVarP(flagSet, &ci.Progress, "progress", "P", ci.Progress, "Show progress during transfer.")
flags.BoolVarP(flagSet, &fs.Config.ProgressTerminalTitle, "progress-terminal-title", "", fs.Config.ProgressTerminalTitle, "Show progress on the terminal title. Requires -P/--progress.") flags.BoolVarP(flagSet, &ci.ProgressTerminalTitle, "progress-terminal-title", "", ci.ProgressTerminalTitle, "Show progress on the terminal title. Requires -P/--progress.")
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.") flags.BoolVarP(flagSet, &ci.Cookie, "use-cookies", "", ci.Cookie, "Enable session cookiejar.")
flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).") flags.BoolVarP(flagSet, &ci.UseMmap, "use-mmap", "", ci.UseMmap, "Use mmap allocator (see docs).")
flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers") flags.StringVarP(flagSet, &ci.CaCert, "ca-cert", "", ci.CaCert, "CA certificate used to verify servers")
flags.StringVarP(flagSet, &fs.Config.ClientCert, "client-cert", "", fs.Config.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth") flags.StringVarP(flagSet, &ci.ClientCert, "client-cert", "", ci.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth")
flags.StringVarP(flagSet, &fs.Config.ClientKey, "client-key", "", fs.Config.ClientKey, "Client SSL private key (PEM) for mutual TLS auth") flags.StringVarP(flagSet, &ci.ClientKey, "client-key", "", ci.ClientKey, "Client SSL private key (PEM) for mutual TLS auth")
flags.FVarP(flagSet, &fs.Config.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size.") flags.FVarP(flagSet, &ci.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size.")
flags.IntVarP(flagSet, &fs.Config.MultiThreadStreams, "multi-thread-streams", "", fs.Config.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.") flags.IntVarP(flagSet, &ci.MultiThreadStreams, "multi-thread-streams", "", ci.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.")
flags.BoolVarP(flagSet, &fs.Config.UseJSONLog, "use-json-log", "", fs.Config.UseJSONLog, "Use json log format.") flags.BoolVarP(flagSet, &ci.UseJSONLog, "use-json-log", "", ci.UseJSONLog, "Use json log format.")
flags.StringVarP(flagSet, &fs.Config.OrderBy, "order-by", "", fs.Config.OrderBy, "Instructions on how to order the transfers, e.g. 'size,descending'") flags.StringVarP(flagSet, &ci.OrderBy, "order-by", "", ci.OrderBy, "Instructions on how to order the transfers, e.g. 'size,descending'")
flags.StringArrayVarP(flagSet, &uploadHeaders, "header-upload", "", nil, "Set HTTP header for upload transactions") flags.StringArrayVarP(flagSet, &uploadHeaders, "header-upload", "", nil, "Set HTTP header for upload transactions")
flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions") flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions")
flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions") flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions")
flags.BoolVarP(flagSet, &fs.Config.RefreshTimes, "refresh-times", "", fs.Config.RefreshTimes, "Refresh the modtime of remote files.") flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files.")
flags.BoolVarP(flagSet, &fs.Config.LogSystemdSupport, "log-systemd", "", fs.Config.LogSystemdSupport, "Activate systemd integration for the logger.") flags.BoolVarP(flagSet, &ci.LogSystemdSupport, "log-systemd", "", ci.LogSystemdSupport, "Activate systemd integration for the logger.")
} }
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions // ParseHeaders converts the strings passed in via the header flags into HTTPOptions
@ -145,17 +145,17 @@ func ParseHeaders(headers []string) []*fs.HTTPOption {
} }
// SetFlags converts any flags into config which weren't straight forward // SetFlags converts any flags into config which weren't straight forward
func SetFlags() { func SetFlags(ci *fs.ConfigInfo) {
if verbose >= 2 { if verbose >= 2 {
fs.Config.LogLevel = fs.LogLevelDebug ci.LogLevel = fs.LogLevelDebug
} else if verbose >= 1 { } else if verbose >= 1 {
fs.Config.LogLevel = fs.LogLevelInfo ci.LogLevel = fs.LogLevelInfo
} }
if quiet { if quiet {
if verbose > 0 { if verbose > 0 {
log.Fatalf("Can't set -v and -q") log.Fatalf("Can't set -v and -q")
} }
fs.Config.LogLevel = fs.LogLevelError ci.LogLevel = fs.LogLevelError
} }
logLevelFlag := pflag.Lookup("log-level") logLevelFlag := pflag.Lookup("log-level")
if logLevelFlag != nil && logLevelFlag.Changed { if logLevelFlag != nil && logLevelFlag.Changed {
@ -166,13 +166,13 @@ func SetFlags() {
log.Fatalf("Can't set -q and --log-level") log.Fatalf("Can't set -q and --log-level")
} }
} }
if fs.Config.UseJSONLog { if ci.UseJSONLog {
logrus.AddHook(fsLog.NewCallerHook()) logrus.AddHook(fsLog.NewCallerHook())
logrus.SetFormatter(&logrus.JSONFormatter{ logrus.SetFormatter(&logrus.JSONFormatter{
TimestampFormat: "2006-01-02T15:04:05.999999-07:00", TimestampFormat: "2006-01-02T15:04:05.999999-07:00",
}) })
logrus.SetLevel(logrus.DebugLevel) logrus.SetLevel(logrus.DebugLevel)
switch fs.Config.LogLevel { switch ci.LogLevel {
case fs.LogLevelEmergency, fs.LogLevelAlert: case fs.LogLevelEmergency, fs.LogLevelAlert:
logrus.SetLevel(logrus.PanicLevel) logrus.SetLevel(logrus.PanicLevel)
case fs.LogLevelCritical: case fs.LogLevelCritical:
@ -189,11 +189,11 @@ func SetFlags() {
} }
if dumpHeaders { if dumpHeaders {
fs.Config.Dump |= fs.DumpHeaders ci.Dump |= fs.DumpHeaders
fs.Logf(nil, "--dump-headers is obsolete - please use --dump headers instead") fs.Logf(nil, "--dump-headers is obsolete - please use --dump headers instead")
} }
if dumpBodies { if dumpBodies {
fs.Config.Dump |= fs.DumpBodies ci.Dump |= fs.DumpBodies
fs.Logf(nil, "--dump-bodies is obsolete - please use --dump bodies instead") fs.Logf(nil, "--dump-bodies is obsolete - please use --dump bodies instead")
} }
@ -202,26 +202,26 @@ func SetFlags() {
deleteDuring && deleteAfter: deleteDuring && deleteAfter:
log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`) log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
case deleteBefore: case deleteBefore:
fs.Config.DeleteMode = fs.DeleteModeBefore ci.DeleteMode = fs.DeleteModeBefore
case deleteDuring: case deleteDuring:
fs.Config.DeleteMode = fs.DeleteModeDuring ci.DeleteMode = fs.DeleteModeDuring
case deleteAfter: case deleteAfter:
fs.Config.DeleteMode = fs.DeleteModeAfter ci.DeleteMode = fs.DeleteModeAfter
default: default:
fs.Config.DeleteMode = fs.DeleteModeDefault ci.DeleteMode = fs.DeleteModeDefault
} }
if fs.Config.CompareDest != "" && fs.Config.CopyDest != "" { if ci.CompareDest != "" && ci.CopyDest != "" {
log.Fatalf(`Can't use --compare-dest with --copy-dest.`) log.Fatalf(`Can't use --compare-dest with --copy-dest.`)
} }
switch { switch {
case len(fs.Config.StatsOneLineDateFormat) > 0: case len(ci.StatsOneLineDateFormat) > 0:
fs.Config.StatsOneLineDate = true ci.StatsOneLineDate = true
fs.Config.StatsOneLine = true ci.StatsOneLine = true
case fs.Config.StatsOneLineDate: case ci.StatsOneLineDate:
fs.Config.StatsOneLineDateFormat = "2006/01/02 15:04:05 - " ci.StatsOneLineDateFormat = "2006/01/02 15:04:05 - "
fs.Config.StatsOneLine = true ci.StatsOneLine = true
} }
if bindAddr != "" { if bindAddr != "" {
@ -232,24 +232,24 @@ func SetFlags() {
if len(addrs) != 1 { if len(addrs) != 1 {
log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs)) log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs))
} }
fs.Config.BindAddr = addrs[0] ci.BindAddr = addrs[0]
} }
if disableFeatures != "" { if disableFeatures != "" {
if disableFeatures == "help" { if disableFeatures == "help" {
log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", ")) log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", "))
} }
fs.Config.DisableFeatures = strings.Split(disableFeatures, ",") ci.DisableFeatures = strings.Split(disableFeatures, ",")
} }
if len(uploadHeaders) != 0 { if len(uploadHeaders) != 0 {
fs.Config.UploadHeaders = ParseHeaders(uploadHeaders) ci.UploadHeaders = ParseHeaders(uploadHeaders)
} }
if len(downloadHeaders) != 0 { if len(downloadHeaders) != 0 {
fs.Config.DownloadHeaders = ParseHeaders(downloadHeaders) ci.DownloadHeaders = ParseHeaders(downloadHeaders)
} }
if len(headers) != 0 { if len(headers) != 0 {
fs.Config.Headers = ParseHeaders(headers) ci.Headers = ParseHeaders(headers)
} }
// Make the config file absolute // Make the config file absolute
@ -260,6 +260,6 @@ func SetFlags() {
// Set whether multi-thread-streams was set // Set whether multi-thread-streams was set
multiThreadStreamsFlag := pflag.Lookup("multi-thread-streams") multiThreadStreamsFlag := pflag.Lookup("multi-thread-streams")
fs.Config.MultiThreadSet = multiThreadStreamsFlag != nil && multiThreadStreamsFlag.Changed ci.MultiThreadSet = multiThreadStreamsFlag != nil && multiThreadStreamsFlag.Changed
} }

29
fs/config_test.go Normal file
View File

@ -0,0 +1,29 @@
package fs
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetConfig(t *testing.T) {
ctx := context.Background()
// Check nil
config := GetConfig(nil)
assert.Equal(t, globalConfig, config)
// Check empty config
config = GetConfig(ctx)
assert.Equal(t, globalConfig, config)
// Check adding a config
ctx2, config2 := AddConfig(ctx)
config2.Transfers++
assert.NotEqual(t, config2, config)
// Check can get config back
config2ctx := GetConfig(ctx2)
assert.Equal(t, config2, config2ctx)
}

View File

@ -229,7 +229,7 @@ func NewFilter(opt *Opt) (f *Filter, err error) {
return nil, err return nil, err
} }
} }
if fs.Config.Dump&fs.DumpFilters != 0 { if fs.GetConfig(context.Background()).Dump&fs.DumpFilters != 0 {
fmt.Println("--- start filters ---") fmt.Println("--- start filters ---")
fmt.Println(f.DumpFilters()) fmt.Println(f.DumpFilters())
fmt.Println("--- end filters ---") fmt.Println("--- end filters ---")
@ -540,14 +540,16 @@ var errFilesFromNotSet = errors.New("--files-from not set so can't use Filter.Li
// MakeListR makes function to return all the files set using --files-from // MakeListR makes function to return all the files set using --files-from
func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Context, remote string) (fs.Object, error)) fs.ListRFn { func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Context, remote string) (fs.Object, error)) fs.ListRFn {
return func(ctx context.Context, dir string, callback fs.ListRCallback) error { return func(ctx context.Context, dir string, callback fs.ListRCallback) error {
ci := fs.GetConfig(ctx)
if !f.HaveFilesFrom() { if !f.HaveFilesFrom() {
return errFilesFromNotSet return errFilesFromNotSet
} }
var ( var (
remotes = make(chan string, fs.Config.Checkers) checkers = ci.Checkers
g errgroup.Group remotes = make(chan string, checkers)
g errgroup.Group
) )
for i := 0; i < fs.Config.Checkers; i++ { for i := 0; i < checkers; i++ {
g.Go(func() (err error) { g.Go(func() (err error) {
var entries = make(fs.DirEntries, 1) var entries = make(fs.DirEntries, 1)
for remote := range remotes { for remote := range remotes {

View File

@ -774,7 +774,7 @@ func (ft *Features) Fill(ctx context.Context, f Fs) *Features {
if do, ok := f.(Commander); ok { if do, ok := f.(Commander); ok {
ft.Command = do.Command ft.Command = do.Command
} }
return ft.DisableList(Config.DisableFeatures) return ft.DisableList(GetConfig(ctx).DisableFeatures)
} }
// Mask the Features with the Fs passed in // Mask the Features with the Fs passed in
@ -854,7 +854,7 @@ func (ft *Features) Mask(ctx context.Context, f Fs) *Features {
ft.Disconnect = nil ft.Disconnect = nil
} }
// Command is always local so we don't mask it // Command is always local so we don't mask it
return ft.DisableList(Config.DisableFeatures) return ft.DisableList(GetConfig(ctx).DisableFeatures)
} }
// Wrap makes a Copy of the features passed in, overriding the UnWrap/Wrap // Wrap makes a Copy of the features passed in, overriding the UnWrap/Wrap
@ -1399,7 +1399,7 @@ func FileExists(ctx context.Context, fs Fs, remote string) (bool, error) {
// GetModifyWindow calculates the maximum modify window between the given Fses // GetModifyWindow calculates the maximum modify window between the given Fses
// and the Config.ModifyWindow parameter. // and the Config.ModifyWindow parameter.
func GetModifyWindow(ctx context.Context, fss ...Info) time.Duration { func GetModifyWindow(ctx context.Context, fss ...Info) time.Duration {
window := Config.ModifyWindow window := GetConfig(ctx).ModifyWindow
for _, f := range fss { for _, f := range fss {
if f != nil { if f != nil {
precision := f.Precision() precision := f.Precision()
@ -1424,12 +1424,12 @@ type logCalculator struct {
} }
// NewPacer creates a Pacer for the given Fs and Calculator. // NewPacer creates a Pacer for the given Fs and Calculator.
func NewPacer(c pacer.Calculator) *Pacer { func NewPacer(ctx context.Context, c pacer.Calculator) *Pacer {
p := &Pacer{ p := &Pacer{
Pacer: pacer.New( Pacer: pacer.New(
pacer.InvokerOption(pacerInvoker), pacer.InvokerOption(pacerInvoker),
pacer.MaxConnectionsOption(Config.Checkers+Config.Transfers), pacer.MaxConnectionsOption(GetConfig(ctx).Checkers+GetConfig(ctx).Transfers),
pacer.RetriesOption(Config.LowLevelRetries), pacer.RetriesOption(GetConfig(ctx).LowLevelRetries),
pacer.CalculatorOption(c), pacer.CalculatorOption(c),
), ),
} }

View File

@ -127,15 +127,15 @@ func (dp *dummyPaced) fn() (bool, error) {
} }
func TestPacerCall(t *testing.T) { func TestPacerCall(t *testing.T) {
expectedCalled := Config.LowLevelRetries ctx := context.Background()
config := GetConfig(ctx)
expectedCalled := config.LowLevelRetries
if expectedCalled == 0 { if expectedCalled == 0 {
ctx, config = AddConfig(ctx)
expectedCalled = 20 expectedCalled = 20
Config.LowLevelRetries = expectedCalled config.LowLevelRetries = expectedCalled
defer func() {
Config.LowLevelRetries = 0
}()
} }
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond))) p := NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
dp := &dummyPaced{retry: true} dp := &dummyPaced{retry: true}
err := p.Call(dp.fn) err := p.Call(dp.fn)
@ -144,7 +144,7 @@ func TestPacerCall(t *testing.T) {
} }
func TestPacerCallNoRetry(t *testing.T) { func TestPacerCallNoRetry(t *testing.T) {
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond))) p := NewPacer(context.Background(), pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
dp := &dummyPaced{retry: true} dp := &dummyPaced{retry: true}
err := p.CallNoRetry(dp.fn) err := p.CallNoRetry(dp.fn)

View File

@ -34,14 +34,14 @@ var (
) )
// StartHTTPTokenBucket starts the token bucket if necessary // StartHTTPTokenBucket starts the token bucket if necessary
func StartHTTPTokenBucket() { func StartHTTPTokenBucket(ctx context.Context) {
if fs.Config.TPSLimit > 0 { if fs.GetConfig(ctx).TPSLimit > 0 {
tpsBurst := fs.Config.TPSLimitBurst tpsBurst := fs.GetConfig(ctx).TPSLimitBurst
if tpsBurst < 1 { if tpsBurst < 1 {
tpsBurst = 1 tpsBurst = 1
} }
tpsBucket = rate.NewLimiter(rate.Limit(fs.Config.TPSLimit), tpsBurst) tpsBucket = rate.NewLimiter(rate.Limit(fs.GetConfig(ctx).TPSLimit), tpsBurst)
fs.Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", fs.Config.TPSLimit, tpsBurst) fs.Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", fs.GetConfig(ctx).TPSLimit, tpsBurst)
} }
} }

View File

@ -1,6 +1,7 @@
package fs package fs
import ( import (
"context"
"fmt" "fmt"
"log" "log"
@ -72,7 +73,7 @@ func (l *LogLevel) Type() string {
// LogPrint sends the text to the logger of level // LogPrint sends the text to the logger of level
var LogPrint = func(level LogLevel, text string) { var LogPrint = func(level LogLevel, text string) {
var prefix string var prefix string
if Config.LogSystemdSupport { if GetConfig(context.TODO()).LogSystemdSupport {
switch level { switch level {
case LogLevelDebug: case LogLevelDebug:
prefix = sysdjournald.DebugPrefix prefix = sysdjournald.DebugPrefix
@ -121,7 +122,7 @@ func (j LogValueItem) String() string {
func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) { func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
out := fmt.Sprintf(text, args...) out := fmt.Sprintf(text, args...)
if Config.UseJSONLog { if GetConfig(context.TODO()).UseJSONLog {
fields := logrus.Fields{} fields := logrus.Fields{}
if o != nil { if o != nil {
fields = logrus.Fields{ fields = logrus.Fields{
@ -158,7 +159,7 @@ func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{})
// LogLevelPrintf writes logs at the given level // LogLevelPrintf writes logs at the given level
func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) { func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= level { if GetConfig(context.TODO()).LogLevel >= level {
LogPrintf(level, o, text, args...) LogPrintf(level, o, text, args...)
} }
} }
@ -166,7 +167,7 @@ func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interfac
// Errorf writes error log output for this Object or Fs. It // Errorf writes error log output for this Object or Fs. It
// should always be seen by the user. // should always be seen by the user.
func Errorf(o interface{}, text string, args ...interface{}) { func Errorf(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelError { if GetConfig(context.TODO()).LogLevel >= LogLevelError {
LogPrintf(LogLevelError, o, text, args...) LogPrintf(LogLevelError, o, text, args...)
} }
} }
@ -177,7 +178,7 @@ func Errorf(o interface{}, text string, args ...interface{}) {
// important things the user should see. The user can filter these // important things the user should see. The user can filter these
// out with the -q flag. // out with the -q flag.
func Logf(o interface{}, text string, args ...interface{}) { func Logf(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelNotice { if GetConfig(context.TODO()).LogLevel >= LogLevelNotice {
LogPrintf(LogLevelNotice, o, text, args...) LogPrintf(LogLevelNotice, o, text, args...)
} }
} }
@ -186,7 +187,7 @@ func Logf(o interface{}, text string, args ...interface{}) {
// level for logging transfers, deletions and things which should // level for logging transfers, deletions and things which should
// appear with the -v flag. // appear with the -v flag.
func Infof(o interface{}, text string, args ...interface{}) { func Infof(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelInfo { if GetConfig(context.TODO()).LogLevel >= LogLevelInfo {
LogPrintf(LogLevelInfo, o, text, args...) LogPrintf(LogLevelInfo, o, text, args...)
} }
} }
@ -194,7 +195,7 @@ func Infof(o interface{}, text string, args ...interface{}) {
// Debugf writes debugging output for this Object or Fs. Use this for // Debugf writes debugging output for this Object or Fs. Use this for
// debug only. The user must have to specify -vv to see this. // debug only. The user must have to specify -vv to see this.
func Debugf(o interface{}, text string, args ...interface{}) { func Debugf(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelDebug { if GetConfig(context.TODO()).LogLevel >= LogLevelDebug {
LogPrintf(LogLevelDebug, o, text, args...) LogPrintf(LogLevelDebug, o, text, args...)
} }
} }

View File

@ -2,6 +2,7 @@
package log package log
import ( import (
"context"
"io" "io"
"log" "log"
"os" "os"
@ -51,7 +52,7 @@ func fnName() string {
// //
// Any pointers in the exit function will be dereferenced // Any pointers in the exit function will be dereferenced
func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) { func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) {
if fs.Config.LogLevel < fs.LogLevelDebug { if fs.GetConfig(context.Background()).LogLevel < fs.LogLevelDebug {
return func(format string, a ...interface{}) {} return func(format string, a ...interface{}) {}
} }
name := fnName() name := fnName()
@ -76,7 +77,7 @@ func Trace(o interface{}, format string, a ...interface{}) func(string, ...inter
// Stack logs a stack trace of callers with the o and info passed in // Stack logs a stack trace of callers with the o and info passed in
func Stack(o interface{}, info string) { func Stack(o interface{}, info string) {
if fs.Config.LogLevel < fs.LogLevelDebug { if fs.GetConfig(context.Background()).LogLevel < fs.LogLevelDebug {
return return
} }
arr := [16 * 1024]byte{} arr := [16 * 1024]byte{}
@ -90,7 +91,7 @@ func Stack(o interface{}, info string) {
func InitLogging() { func InitLogging() {
flagsStr := "," + Opt.Format + "," flagsStr := "," + Opt.Format + ","
var flags int var flags int
if !fs.Config.LogSystemdSupport { if !fs.GetConfig(context.Background()).LogSystemdSupport {
if strings.Contains(flagsStr, ",date,") { if strings.Contains(flagsStr, ",date,") {
flags |= log.Ldate flags |= log.Ldate
} }

View File

@ -49,10 +49,11 @@ type Marcher interface {
} }
// init sets up a march over opt.Fsrc, and opt.Fdst calling back callback for each match // init sets up a march over opt.Fsrc, and opt.Fdst calling back callback for each match
func (m *March) init() { func (m *March) init(ctx context.Context) {
m.srcListDir = m.makeListDir(m.Fsrc, m.SrcIncludeAll) ci := fs.GetConfig(ctx)
m.srcListDir = m.makeListDir(ctx, m.Fsrc, m.SrcIncludeAll)
if !m.NoTraverse { if !m.NoTraverse {
m.dstListDir = m.makeListDir(m.Fdst, m.DstIncludeAll) m.dstListDir = m.makeListDir(ctx, m.Fdst, m.DstIncludeAll)
} }
// Now create the matching transform // Now create the matching transform
// ..normalise the UTF8 first // ..normalise the UTF8 first
@ -65,7 +66,7 @@ func (m *March) init() {
// | Yes | No | No | // | Yes | No | No |
// | No | Yes | Yes | // | No | Yes | Yes |
// | Yes | Yes | Yes | // | Yes | Yes | Yes |
if m.Fdst.Features().CaseInsensitive || fs.Config.IgnoreCaseSync { if m.Fdst.Features().CaseInsensitive || ci.IgnoreCaseSync {
m.transforms = append(m.transforms, strings.ToLower) m.transforms = append(m.transforms, strings.ToLower)
} }
} }
@ -75,9 +76,10 @@ type listDirFn func(dir string) (entries fs.DirEntries, err error)
// makeListDir makes constructs a listing function for the given fs // makeListDir makes constructs a listing function for the given fs
// and includeAll flags for marching through the file system. // and includeAll flags for marching through the file system.
func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn { func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listDirFn {
if !(fs.Config.UseListR && f.Features().ListR != nil) && // !--fast-list active and ci := fs.GetConfig(ctx)
!(fs.Config.NoTraverse && filter.Active.HaveFilesFrom()) { // !(--files-from and --no-traverse) if !(ci.UseListR && f.Features().ListR != nil) && // !--fast-list active and
!(ci.NoTraverse && filter.Active.HaveFilesFrom()) { // !(--files-from and --no-traverse)
return func(dir string) (entries fs.DirEntries, err error) { return func(dir string) (entries fs.DirEntries, err error) {
return list.DirSorted(m.Ctx, f, includeAll, dir) return list.DirSorted(m.Ctx, f, includeAll, dir)
} }
@ -95,7 +97,7 @@ func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn {
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
if !started { if !started {
dirs, dirsErr = walk.NewDirTree(m.Ctx, f, m.Dir, includeAll, fs.Config.MaxDepth) dirs, dirsErr = walk.NewDirTree(m.Ctx, f, m.Dir, includeAll, ci.MaxDepth)
started = true started = true
} }
if dirsErr != nil { if dirsErr != nil {
@ -122,10 +124,11 @@ type listDirJob struct {
} }
// Run starts the matching process off // Run starts the matching process off
func (m *March) Run() error { func (m *March) Run(ctx context.Context) error {
m.init() ci := fs.GetConfig(ctx)
m.init(ctx)
srcDepth := fs.Config.MaxDepth srcDepth := ci.MaxDepth
if srcDepth < 0 { if srcDepth < 0 {
srcDepth = fs.MaxLevel srcDepth = fs.MaxLevel
} }
@ -141,8 +144,9 @@ func (m *March) Run() error {
// Start some directory listing go routines // Start some directory listing go routines
var wg sync.WaitGroup // sync closing of go routines var wg sync.WaitGroup // sync closing of go routines
var traversing sync.WaitGroup // running directory traversals var traversing sync.WaitGroup // running directory traversals
in := make(chan listDirJob, fs.Config.Checkers) checkers := ci.Checkers
for i := 0; i < fs.Config.Checkers; i++ { in := make(chan listDirJob, checkers)
for i := 0; i < checkers; i++ {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()

View File

@ -203,7 +203,7 @@ func TestMarch(t *testing.T) {
DstIncludeAll: filter.Active.Opt.DeleteExcluded, DstIncludeAll: filter.Active.Opt.DeleteExcluded,
} }
mt.processError(m.Run()) mt.processError(m.Run(ctx))
mt.cancel() mt.cancel()
err := mt.currentError() err := mt.currentError()
require.NoError(t, err) require.NoError(t, err)
@ -270,7 +270,7 @@ func TestMarchNoTraverse(t *testing.T) {
DstIncludeAll: filter.Active.Opt.DeleteExcluded, DstIncludeAll: filter.Active.Opt.DeleteExcluded,
} }
mt.processError(m.Run()) mt.processError(m.Run(ctx))
mt.cancel() mt.cancel()
err := mt.currentError() err := mt.currentError()
require.NoError(t, err) require.NoError(t, err)

View File

@ -114,16 +114,17 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
// check to see if two objects are identical using the check function // check to see if two objects are identical using the check function
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
ci := fs.GetConfig(ctx)
tr := accounting.Stats(ctx).NewCheckingTransfer(src) tr := accounting.Stats(ctx).NewCheckingTransfer(src)
defer func() { defer func() {
tr.Done(ctx, err) tr.Done(ctx, err)
}() }()
if sizeDiffers(src, dst) { if sizeDiffers(ctx, src, dst) {
err = errors.Errorf("Sizes differ") err = errors.Errorf("Sizes differ")
fs.Errorf(src, "%v", err) fs.Errorf(src, "%v", err)
return true, false, nil return true, false, nil
} }
if fs.Config.SizeOnly { if ci.SizeOnly {
return false, false, nil return false, false, nil
} }
return c.opt.Check(ctx, dst, src) return c.opt.Check(ctx, dst, src)
@ -202,11 +203,12 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b
// it returns true if differences were found // it returns true if differences were found
// it also returns whether it couldn't be hashed // it also returns whether it couldn't be hashed
func CheckFn(ctx context.Context, opt *CheckOpt) error { func CheckFn(ctx context.Context, opt *CheckOpt) error {
ci := fs.GetConfig(ctx)
if opt.Check == nil { if opt.Check == nil {
return errors.New("internal error: nil check function") return errors.New("internal error: nil check function")
} }
c := &checkMarch{ c := &checkMarch{
tokens: make(chan struct{}, fs.Config.Checkers), tokens: make(chan struct{}, ci.Checkers),
opt: *opt, opt: *opt,
} }
@ -219,7 +221,7 @@ func CheckFn(ctx context.Context, opt *CheckOpt) error {
Callback: c, Callback: c,
} }
fs.Debugf(c.opt.Fdst, "Waiting for checks to finish") fs.Debugf(c.opt.Fdst, "Waiting for checks to finish")
err := m.Run() err := m.Run(ctx)
c.wg.Wait() // wait for background go-routines c.wg.Wait() // wait for background go-routines
if c.dstFilesMissing > 0 { if c.dstFilesMissing > 0 {
@ -308,7 +310,8 @@ func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) {
// //
// it returns true if differences were found // it returns true if differences were found
func CheckIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) { func CheckIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) {
err = Retry(src, fs.Config.LowLevelRetries, func() error { ci := fs.GetConfig(ctx)
err = Retry(src, ci.LowLevelRetries, func() error {
differ, err = checkIdenticalDownload(ctx, dst, src) differ, err = checkIdenticalDownload(ctx, dst, src)
return err return err
}) })

View File

@ -24,6 +24,8 @@ import (
func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operations.CheckOpt) error) { func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operations.CheckOpt) error) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
ctx := context.Background()
ci := fs.GetConfig(ctx)
addBuffers := func(opt *operations.CheckOpt) { addBuffers := func(opt *operations.CheckOpt) {
opt.Combined = new(bytes.Buffer) opt.Combined = new(bytes.Buffer)
@ -73,7 +75,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
OneWay: oneway, OneWay: oneway,
} }
addBuffers(&opt) addBuffers(&opt)
err := checkFunction(context.Background(), &opt) err := checkFunction(ctx, &opt)
gotErrors := accounting.GlobalStats().GetErrors() gotErrors := accounting.GlobalStats().GetErrors()
gotChecks := accounting.GlobalStats().GetChecks() gotChecks := accounting.GlobalStats().GetChecks()
if wantErrors == 0 && err != nil { if wantErrors == 0 && err != nil {
@ -95,7 +97,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
}) })
} }
file1 := r.WriteBoth(context.Background(), "rutabaga", "is tasty", t3) file1 := r.WriteBoth(ctx, "rutabaga", "is tasty", t3)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
check(1, 0, 1, false, map[string]string{ check(1, 0, 1, false, map[string]string{
@ -118,7 +120,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
"error": "", "error": "",
}) })
file3 := r.WriteObject(context.Background(), "empty space", "-", t2) file3 := r.WriteObject(ctx, "empty space", "-", t2)
fstest.CheckItems(t, r.Fremote, file1, file3) fstest.CheckItems(t, r.Fremote, file1, file3)
check(3, 2, 1, false, map[string]string{ check(3, 2, 1, false, map[string]string{
"combined": "- empty space\n+ potato2\n= rutabaga\n", "combined": "- empty space\n+ potato2\n= rutabaga\n",
@ -130,10 +132,10 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
}) })
file2r := file2 file2r := file2
if fs.Config.SizeOnly { if ci.SizeOnly {
file2r = r.WriteObject(context.Background(), "potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1) file2r = r.WriteObject(ctx, "potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1)
} else { } else {
r.WriteObject(context.Background(), "potato2", "------------------------------------------------------------", t1) r.WriteObject(ctx, "potato2", "------------------------------------------------------------", t1)
} }
fstest.CheckItems(t, r.Fremote, file1, file2r, file3) fstest.CheckItems(t, r.Fremote, file1, file2r, file3)
check(4, 1, 2, false, map[string]string{ check(4, 1, 2, false, map[string]string{
@ -157,7 +159,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
"error": "", "error": "",
}) })
file4 := r.WriteObject(context.Background(), "remotepotato", "------------------------------------------------------------", t1) file4 := r.WriteObject(ctx, "remotepotato", "------------------------------------------------------------", t1)
fstest.CheckItems(t, r.Fremote, file1, file2r, file3r, file4) fstest.CheckItems(t, r.Fremote, file1, file2r, file3r, file4)
check(6, 2, 3, false, map[string]string{ check(6, 2, 3, false, map[string]string{
"combined": "* empty space\n= potato2\n= rutabaga\n- remotepotato\n", "combined": "* empty space\n= potato2\n= rutabaga\n- remotepotato\n",
@ -182,11 +184,12 @@ func TestCheck(t *testing.T) {
} }
func TestCheckFsError(t *testing.T) { func TestCheckFsError(t *testing.T) {
dstFs, err := fs.NewFs(context.Background(), "non-existent") ctx := context.Background()
dstFs, err := fs.NewFs(ctx, "non-existent")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
srcFs, err := fs.NewFs(context.Background(), "non-existent") srcFs, err := fs.NewFs(ctx, "non-existent")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -195,7 +198,7 @@ func TestCheckFsError(t *testing.T) {
Fsrc: srcFs, Fsrc: srcFs,
OneWay: false, OneWay: false,
} }
err = operations.Check(context.Background(), &opt) err = operations.Check(ctx, &opt)
require.Error(t, err) require.Error(t, err)
} }
@ -204,8 +207,10 @@ func TestCheckDownload(t *testing.T) {
} }
func TestCheckSizeOnly(t *testing.T) { func TestCheckSizeOnly(t *testing.T) {
fs.Config.SizeOnly = true ctx := context.Background()
defer func() { fs.Config.SizeOnly = false }() ci := fs.GetConfig(ctx)
ci.SizeOnly = true
defer func() { ci.SizeOnly = false }()
TestCheck(t) TestCheck(t)
} }

View File

@ -75,6 +75,8 @@ func dedupeDeleteAllButOne(ctx context.Context, keep int, remote string, objs []
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies // dedupeDeleteIdentical deletes all but one of identical (by hash) copies
func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) { func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) {
ci := fs.GetConfig(ctx)
// Make map of IDs // Make map of IDs
IDs := make(map[string]int, len(objs)) IDs := make(map[string]int, len(objs))
for _, o := range objs { for _, o := range objs {
@ -104,7 +106,7 @@ func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, obj
dupesByID := make(map[string][]fs.Object, len(objs)) dupesByID := make(map[string][]fs.Object, len(objs))
for _, o := range objs { for _, o := range objs {
ID := "" ID := ""
if fs.Config.SizeOnly && o.Size() >= 0 { if ci.SizeOnly && o.Size() >= 0 {
ID = fmt.Sprintf("size %d", o.Size()) ID = fmt.Sprintf("size %d", o.Size())
} else if ht != hash.None { } else if ht != hash.None {
hashValue, err := o.Hash(ctx, ht) hashValue, err := o.Hash(ctx, ht)
@ -229,8 +231,9 @@ func (x *DeduplicateMode) Type() string {
// dedupeFindDuplicateDirs scans f for duplicate directories // dedupeFindDuplicateDirs scans f for duplicate directories
func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, error) { func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, error) {
ci := fs.GetConfig(ctx)
dirs := map[string][]fs.Directory{} dirs := map[string][]fs.Directory{}
err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error { err := walk.ListR(ctx, f, "", true, ci.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error {
entries.ForDir(func(d fs.Directory) { entries.ForDir(func(d fs.Directory) {
dirs[d.Remote()] = append(dirs[d.Remote()], d) dirs[d.Remote()] = append(dirs[d.Remote()], d)
}) })
@ -297,6 +300,7 @@ func sortSmallestFirst(objs []fs.Object) {
// Google Drive which can have duplicate file names. // Google Drive which can have duplicate file names.
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error { func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
fs.Infof(f, "Looking for duplicates using %v mode.", mode) fs.Infof(f, "Looking for duplicates using %v mode.", mode)
ci := fs.GetConfig(ctx)
// Find duplicate directories first and fix them // Find duplicate directories first and fix them
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f) duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
@ -315,7 +319,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
// Now find duplicate files // Now find duplicate files
files := map[string][]fs.Object{} files := map[string][]fs.Object{}
err = walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.ListR(ctx, f, "", true, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(o fs.Object) { entries.ForObject(func(o fs.Object) {
remote := o.Remote() remote := o.Remote()
files[remote] = append(files[remote], o) files[remote] = append(files[remote], o)

View File

@ -79,15 +79,17 @@ func TestDeduplicateSizeOnly(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
skipIfCantDedupe(t, r.Fremote) skipIfCantDedupe(t, r.Fremote)
ctx := context.Background()
ci := fs.GetConfig(ctx)
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1) file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
file2 := r.WriteUncheckedObject(context.Background(), "one", "THIS IS ONE", t1) file2 := r.WriteUncheckedObject(context.Background(), "one", "THIS IS ONE", t1)
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t1) file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t1)
r.CheckWithDuplicates(t, file1, file2, file3) r.CheckWithDuplicates(t, file1, file2, file3)
fs.Config.SizeOnly = true ci.SizeOnly = true
defer func() { defer func() {
fs.Config.SizeOnly = false ci.SizeOnly = false
}() }()
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateSkip) err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateSkip)

View File

@ -115,7 +115,7 @@ func ListJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt,
hashTypes = append(hashTypes, ht) hashTypes = append(hashTypes, ht)
} }
} }
err := walk.ListR(ctx, fsrc, remote, false, ConfigMaxDepth(opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) { err := walk.ListR(ctx, fsrc, remote, false, ConfigMaxDepth(ctx, opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) {
for _, entry := range entries { for _, entry := range entries {
switch entry.(type) { switch entry.(type) {
case fs.Directory: case fs.Directory:

View File

@ -18,15 +18,17 @@ const (
// Return a boolean as to whether we should use multi thread copy for // Return a boolean as to whether we should use multi thread copy for
// this transfer // this transfer
func doMultiThreadCopy(f fs.Fs, src fs.Object) bool { func doMultiThreadCopy(ctx context.Context, f fs.Fs, src fs.Object) bool {
ci := fs.GetConfig(ctx)
// Disable multi thread if... // Disable multi thread if...
// ...it isn't configured // ...it isn't configured
if fs.Config.MultiThreadStreams <= 1 { if ci.MultiThreadStreams <= 1 {
return false return false
} }
// ...size of object is less than cutoff // ...size of object is less than cutoff
if src.Size() < int64(fs.Config.MultiThreadCutoff) { if src.Size() < int64(ci.MultiThreadCutoff) {
return false return false
} }
// ...source doesn't support it // ...source doesn't support it
@ -36,7 +38,7 @@ func doMultiThreadCopy(f fs.Fs, src fs.Object) bool {
} }
// ...if --multi-thread-streams not in use and source and // ...if --multi-thread-streams not in use and source and
// destination are both local // destination are both local
if !fs.Config.MultiThreadSet && dstFeatures.IsLocal && src.Fs().Features().IsLocal { if !ci.MultiThreadSet && dstFeatures.IsLocal && src.Fs().Features().IsLocal {
return false return false
} }
return true return true
@ -55,6 +57,7 @@ type multiThreadCopyState struct {
// Copy a single stream into place // Copy a single stream into place
func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err error) { func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err error) {
ci := fs.GetConfig(ctx)
defer func() { defer func() {
if err != nil { if err != nil {
fs.Debugf(mc.src, "multi-thread copy: stream %d/%d failed: %v", stream+1, mc.streams, err) fs.Debugf(mc.src, "multi-thread copy: stream %d/%d failed: %v", stream+1, mc.streams, err)
@ -71,7 +74,7 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err
fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v starting", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start)) fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v starting", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start))
rc, err := NewReOpen(ctx, mc.src, fs.Config.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1}) rc, err := NewReOpen(ctx, mc.src, ci.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1})
if err != nil { if err != nil {
return errors.Wrap(err, "multipart copy: failed to open source") return errors.Wrap(err, "multipart copy: failed to open source")
} }

View File

@ -17,64 +17,66 @@ import (
) )
func TestDoMultiThreadCopy(t *testing.T) { func TestDoMultiThreadCopy(t *testing.T) {
f := mockfs.NewFs(context.Background(), "potato", "") ctx := context.Background()
ci := fs.GetConfig(ctx)
f := mockfs.NewFs(ctx, "potato", "")
src := mockobject.New("file.txt").WithContent([]byte(random.String(100)), mockobject.SeekModeNone) src := mockobject.New("file.txt").WithContent([]byte(random.String(100)), mockobject.SeekModeNone)
srcFs := mockfs.NewFs(context.Background(), "sausage", "") srcFs := mockfs.NewFs(ctx, "sausage", "")
src.SetFs(srcFs) src.SetFs(srcFs)
oldStreams := fs.Config.MultiThreadStreams oldStreams := ci.MultiThreadStreams
oldCutoff := fs.Config.MultiThreadCutoff oldCutoff := ci.MultiThreadCutoff
oldIsSet := fs.Config.MultiThreadSet oldIsSet := ci.MultiThreadSet
defer func() { defer func() {
fs.Config.MultiThreadStreams = oldStreams ci.MultiThreadStreams = oldStreams
fs.Config.MultiThreadCutoff = oldCutoff ci.MultiThreadCutoff = oldCutoff
fs.Config.MultiThreadSet = oldIsSet ci.MultiThreadSet = oldIsSet
}() }()
fs.Config.MultiThreadStreams, fs.Config.MultiThreadCutoff = 4, 50 ci.MultiThreadStreams, ci.MultiThreadCutoff = 4, 50
fs.Config.MultiThreadSet = false ci.MultiThreadSet = false
nullWriterAt := func(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) { nullWriterAt := func(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
panic("don't call me") panic("don't call me")
} }
f.Features().OpenWriterAt = nullWriterAt f.Features().OpenWriterAt = nullWriterAt
assert.True(t, doMultiThreadCopy(f, src)) assert.True(t, doMultiThreadCopy(ctx, f, src))
fs.Config.MultiThreadStreams = 0 ci.MultiThreadStreams = 0
assert.False(t, doMultiThreadCopy(f, src)) assert.False(t, doMultiThreadCopy(ctx, f, src))
fs.Config.MultiThreadStreams = 1 ci.MultiThreadStreams = 1
assert.False(t, doMultiThreadCopy(f, src)) assert.False(t, doMultiThreadCopy(ctx, f, src))
fs.Config.MultiThreadStreams = 2 ci.MultiThreadStreams = 2
assert.True(t, doMultiThreadCopy(f, src)) assert.True(t, doMultiThreadCopy(ctx, f, src))
fs.Config.MultiThreadCutoff = 200 ci.MultiThreadCutoff = 200
assert.False(t, doMultiThreadCopy(f, src)) assert.False(t, doMultiThreadCopy(ctx, f, src))
fs.Config.MultiThreadCutoff = 101 ci.MultiThreadCutoff = 101
assert.False(t, doMultiThreadCopy(f, src)) assert.False(t, doMultiThreadCopy(ctx, f, src))
fs.Config.MultiThreadCutoff = 100 ci.MultiThreadCutoff = 100
assert.True(t, doMultiThreadCopy(f, src)) assert.True(t, doMultiThreadCopy(ctx, f, src))
f.Features().OpenWriterAt = nil f.Features().OpenWriterAt = nil
assert.False(t, doMultiThreadCopy(f, src)) assert.False(t, doMultiThreadCopy(ctx, f, src))
f.Features().OpenWriterAt = nullWriterAt f.Features().OpenWriterAt = nullWriterAt
assert.True(t, doMultiThreadCopy(f, src)) assert.True(t, doMultiThreadCopy(ctx, f, src))
f.Features().IsLocal = true f.Features().IsLocal = true
srcFs.Features().IsLocal = true srcFs.Features().IsLocal = true
assert.False(t, doMultiThreadCopy(f, src)) assert.False(t, doMultiThreadCopy(ctx, f, src))
fs.Config.MultiThreadSet = true ci.MultiThreadSet = true
assert.True(t, doMultiThreadCopy(f, src)) assert.True(t, doMultiThreadCopy(ctx, f, src))
fs.Config.MultiThreadSet = false ci.MultiThreadSet = false
assert.False(t, doMultiThreadCopy(f, src)) assert.False(t, doMultiThreadCopy(ctx, f, src))
srcFs.Features().IsLocal = false srcFs.Features().IsLocal = false
assert.True(t, doMultiThreadCopy(f, src)) assert.True(t, doMultiThreadCopy(ctx, f, src))
srcFs.Features().IsLocal = true srcFs.Features().IsLocal = true
assert.False(t, doMultiThreadCopy(f, src)) assert.False(t, doMultiThreadCopy(ctx, f, src))
f.Features().IsLocal = false f.Features().IsLocal = false
assert.True(t, doMultiThreadCopy(f, src)) assert.True(t, doMultiThreadCopy(ctx, f, src))
srcFs.Features().IsLocal = false srcFs.Features().IsLocal = false
assert.True(t, doMultiThreadCopy(f, src)) assert.True(t, doMultiThreadCopy(ctx, f, src))
} }
func TestMultithreadCalculateChunks(t *testing.T) { func TestMultithreadCalculateChunks(t *testing.T) {

View File

@ -119,13 +119,14 @@ func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.
// Otherwise the file is considered to be not equal including if there // Otherwise the file is considered to be not equal including if there
// were errors reading info. // were errors reading info.
func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool { func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool {
return equal(ctx, src, dst, defaultEqualOpt()) return equal(ctx, src, dst, defaultEqualOpt(ctx))
} }
// sizeDiffers compare the size of src and dst taking into account the // sizeDiffers compare the size of src and dst taking into account the
// various ways of ignoring sizes // various ways of ignoring sizes
func sizeDiffers(src, dst fs.ObjectInfo) bool { func sizeDiffers(ctx context.Context, src, dst fs.ObjectInfo) bool {
if fs.Config.IgnoreSize || src.Size() < 0 || dst.Size() < 0 { ci := fs.GetConfig(ctx)
if ci.IgnoreSize || src.Size() < 0 || dst.Size() < 0 {
return false return false
} }
return src.Size() != dst.Size() return src.Size() != dst.Size()
@ -142,11 +143,12 @@ type equalOpt struct {
} }
// default set of options for equal() // default set of options for equal()
func defaultEqualOpt() equalOpt { func defaultEqualOpt(ctx context.Context) equalOpt {
ci := fs.GetConfig(ctx)
return equalOpt{ return equalOpt{
sizeOnly: fs.Config.SizeOnly, sizeOnly: ci.SizeOnly,
checkSum: fs.Config.CheckSum, checkSum: ci.CheckSum,
updateModTime: !fs.Config.NoUpdateModTime, updateModTime: !ci.NoUpdateModTime,
forceModTimeMatch: false, forceModTimeMatch: false,
} }
} }
@ -161,7 +163,8 @@ func logModTimeUpload(dst fs.Object) {
} }
func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool { func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool {
if sizeDiffers(src, dst) { ci := fs.GetConfig(ctx)
if sizeDiffers(ctx, src, dst) {
fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size()) fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size())
return false return false
} }
@ -218,7 +221,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
fs.Debugf(src, "%v differ", ht) fs.Debugf(src, "%v differ", ht)
return false return false
} }
if ht == hash.None && !fs.Config.RefreshTimes { if ht == hash.None && !ci.RefreshTimes {
// if couldn't check hash, return that they differ // if couldn't check hash, return that they differ
return false return false
} }
@ -228,7 +231,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
if !SkipDestructive(ctx, src, "update modification time") { if !SkipDestructive(ctx, src, "update modification time") {
// Size and hash the same but mtime different // Size and hash the same but mtime different
// Error if objects are treated as immutable // Error if objects are treated as immutable
if fs.Config.Immutable { if ci.Immutable {
fs.Errorf(dst, "StartedAt mismatch between immutable objects") fs.Errorf(dst, "StartedAt mismatch between immutable objects")
return false return false
} }
@ -243,7 +246,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
fs.Infof(dst, "src and dst identical but can't set mod time without deleting and re-uploading") fs.Infof(dst, "src and dst identical but can't set mod time without deleting and re-uploading")
// Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file // Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file
// put in the BackupDir than deleted which is what will happen if we don't delete it. // put in the BackupDir than deleted which is what will happen if we don't delete it.
if fs.Config.BackupDir == "" { if ci.BackupDir == "" {
err = dst.Remove(ctx) err = dst.Remove(ctx)
if err != nil { if err != nil {
fs.Errorf(dst, "failed to delete before re-upload: %v", err) fs.Errorf(dst, "failed to delete before re-upload: %v", err)
@ -337,11 +340,12 @@ var _ fs.FullObjectInfo = (*OverrideRemote)(nil)
// CommonHash returns a single hash.Type and a HashOption with that // CommonHash returns a single hash.Type and a HashOption with that
// type which is in common between the two fs.Fs. // type which is in common between the two fs.Fs.
func CommonHash(fa, fb fs.Info) (hash.Type, *fs.HashesOption) { func CommonHash(ctx context.Context, fa, fb fs.Info) (hash.Type, *fs.HashesOption) {
ci := fs.GetConfig(ctx)
// work out which hash to use - limit to 1 hash in common // work out which hash to use - limit to 1 hash in common
var common hash.Set var common hash.Set
hashType := hash.None hashType := hash.None
if !fs.Config.IgnoreChecksum { if !ci.IgnoreChecksum {
common = fb.Hashes().Overlap(fa.Hashes()) common = fb.Hashes().Overlap(fa.Hashes())
if common.Count() > 0 { if common.Count() > 0 {
hashType = common.GetOne() hashType = common.GetOne()
@ -357,6 +361,7 @@ func CommonHash(fa, fb fs.Info) (hash.Type, *fs.HashesOption) {
// It returns the destination object if possible. Note that this may // It returns the destination object if possible. Note that this may
// be nil. // be nil.
func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
ci := fs.GetConfig(ctx)
tr := accounting.Stats(ctx).NewTransfer(src) tr := accounting.Stats(ctx).NewTransfer(src)
defer func() { defer func() {
tr.Done(ctx, err) tr.Done(ctx, err)
@ -365,25 +370,25 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
if SkipDestructive(ctx, src, "copy") { if SkipDestructive(ctx, src, "copy") {
return newDst, nil return newDst, nil
} }
maxTries := fs.Config.LowLevelRetries maxTries := ci.LowLevelRetries
tries := 0 tries := 0
doUpdate := dst != nil doUpdate := dst != nil
hashType, hashOption := CommonHash(f, src.Fs()) hashType, hashOption := CommonHash(ctx, f, src.Fs())
var actionTaken string var actionTaken string
for { for {
// Try server-side copy first - if has optional interface and // Try server-side copy first - if has optional interface and
// is same underlying remote // is same underlying remote
actionTaken = "Copied (server-side copy)" actionTaken = "Copied (server-side copy)"
if fs.Config.MaxTransfer >= 0 { if ci.MaxTransfer >= 0 {
var bytesSoFar int64 var bytesSoFar int64
if fs.Config.CutoffMode == fs.CutoffModeCautious { if ci.CutoffMode == fs.CutoffModeCautious {
bytesSoFar = accounting.Stats(ctx).GetBytesWithPending() + src.Size() bytesSoFar = accounting.Stats(ctx).GetBytesWithPending() + src.Size()
} else { } else {
bytesSoFar = accounting.Stats(ctx).GetBytes() bytesSoFar = accounting.Stats(ctx).GetBytes()
} }
if bytesSoFar >= int64(fs.Config.MaxTransfer) { if bytesSoFar >= int64(ci.MaxTransfer) {
if fs.Config.CutoffMode == fs.CutoffModeHard { if ci.CutoffMode == fs.CutoffModeHard {
return nil, accounting.ErrorMaxTransferLimitReachedFatal return nil, accounting.ErrorMaxTransferLimitReachedFatal
} }
return nil, accounting.ErrorMaxTransferLimitReachedGraceful return nil, accounting.ErrorMaxTransferLimitReachedGraceful
@ -408,12 +413,12 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
} }
// If can't server-side copy, do it manually // If can't server-side copy, do it manually
if err == fs.ErrorCantCopy { if err == fs.ErrorCantCopy {
if doMultiThreadCopy(f, src) { if doMultiThreadCopy(ctx, f, src) {
// Number of streams proportional to size // Number of streams proportional to size
streams := src.Size() / int64(fs.Config.MultiThreadCutoff) streams := src.Size() / int64(ci.MultiThreadCutoff)
// With maximum // With maximum
if streams > int64(fs.Config.MultiThreadStreams) { if streams > int64(ci.MultiThreadStreams) {
streams = int64(fs.Config.MultiThreadStreams) streams = int64(ci.MultiThreadStreams)
} }
if streams < 2 { if streams < 2 {
streams = 2 streams = 2
@ -427,10 +432,10 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
} else { } else {
var in0 io.ReadCloser var in0 io.ReadCloser
options := []fs.OpenOption{hashOption} options := []fs.OpenOption{hashOption}
for _, option := range fs.Config.DownloadHeaders { for _, option := range ci.DownloadHeaders {
options = append(options, option) options = append(options, option)
} }
in0, err = NewReOpen(ctx, src, fs.Config.LowLevelRetries, options...) in0, err = NewReOpen(ctx, src, ci.LowLevelRetries, options...)
if err != nil { if err != nil {
err = errors.Wrap(err, "failed to open source object") err = errors.Wrap(err, "failed to open source object")
} else { } else {
@ -452,7 +457,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
wrappedSrc = NewOverrideRemote(src, remote) wrappedSrc = NewOverrideRemote(src, remote)
} }
options := []fs.OpenOption{hashOption} options := []fs.OpenOption{hashOption}
for _, option := range fs.Config.UploadHeaders { for _, option := range ci.UploadHeaders {
options = append(options, option) options = append(options, option)
} }
if doUpdate { if doUpdate {
@ -491,7 +496,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
} }
// Verify sizes are the same after transfer // Verify sizes are the same after transfer
if sizeDiffers(src, dst) { if sizeDiffers(ctx, src, dst) {
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size()) err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
fs.Errorf(dst, "%v", err) fs.Errorf(dst, "%v", err)
err = fs.CountError(err) err = fs.CountError(err)
@ -607,16 +612,17 @@ func CanServerSideMove(fdst fs.Fs) bool {
// SuffixName adds the current --suffix to the remote, obeying // SuffixName adds the current --suffix to the remote, obeying
// --suffix-keep-extension if set // --suffix-keep-extension if set
func SuffixName(remote string) string { func SuffixName(ctx context.Context, remote string) string {
if fs.Config.Suffix == "" { ci := fs.GetConfig(ctx)
if ci.Suffix == "" {
return remote return remote
} }
if fs.Config.SuffixKeepExtension { if ci.SuffixKeepExtension {
ext := path.Ext(remote) ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)] base := remote[:len(remote)-len(ext)]
return base + fs.Config.Suffix + ext return base + ci.Suffix + ext
} }
return remote + fs.Config.Suffix return remote + ci.Suffix
} }
// DeleteFileWithBackupDir deletes a single file respecting --dry-run // DeleteFileWithBackupDir deletes a single file respecting --dry-run
@ -625,12 +631,13 @@ func SuffixName(remote string) string {
// If backupDir is set then it moves the file to there instead of // If backupDir is set then it moves the file to there instead of
// deleting // deleting
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) { func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
ci := fs.GetConfig(ctx)
tr := accounting.Stats(ctx).NewCheckingTransfer(dst) tr := accounting.Stats(ctx).NewCheckingTransfer(dst)
defer func() { defer func() {
tr.Done(ctx, err) tr.Done(ctx, err)
}() }()
numDeletes := accounting.Stats(ctx).Deletes(1) numDeletes := accounting.Stats(ctx).Deletes(1)
if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete { if ci.MaxDelete != -1 && numDeletes > ci.MaxDelete {
return fserrors.FatalError(errors.New("--max-delete threshold reached")) return fserrors.FatalError(errors.New("--max-delete threshold reached"))
} }
action, actioned := "delete", "Deleted" action, actioned := "delete", "Deleted"
@ -669,11 +676,12 @@ func DeleteFile(ctx context.Context, dst fs.Object) (err error) {
// instead of being deleted. // instead of being deleted.
func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error { func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(fs.Config.Transfers) ci := fs.GetConfig(ctx)
wg.Add(ci.Transfers)
var errorCount int32 var errorCount int32
var fatalErrorCount int32 var fatalErrorCount int32
for i := 0; i < fs.Config.Transfers; i++ { for i := 0; i < ci.Transfers; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
for dst := range toBeDeleted { for dst := range toBeDeleted {
@ -779,7 +787,8 @@ func Retry(o interface{}, maxTries int, fn func() error) (err error) {
// //
// Lists in parallel which may get them out of order // Lists in parallel which may get them out of order
func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error { func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error {
return walk.ListR(ctx, f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { ci := fs.GetConfig(ctx)
return walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(fn) entries.ForObject(fn)
return nil return nil
}) })
@ -898,8 +907,9 @@ func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, err error)
} }
// ConfigMaxDepth returns the depth to use for a recursive or non recursive listing. // ConfigMaxDepth returns the depth to use for a recursive or non recursive listing.
func ConfigMaxDepth(recursive bool) int { func ConfigMaxDepth(ctx context.Context, recursive bool) int {
depth := fs.Config.MaxDepth ci := fs.GetConfig(ctx)
depth := ci.MaxDepth
if !recursive && depth < 0 { if !recursive && depth < 0 {
depth = 1 depth = 1
} }
@ -908,7 +918,7 @@ func ConfigMaxDepth(recursive bool) int {
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer // ListDir lists the directories/buckets/containers in the Fs to the supplied writer
func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error { func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error {
return walk.ListR(ctx, f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error { return walk.ListR(ctx, f, "", false, ConfigMaxDepth(ctx, false), walk.ListDirs, func(entries fs.DirEntries) error {
entries.ForDir(func(dir fs.Directory) { entries.ForDir(func(dir fs.Directory) {
if dir != nil { if dir != nil {
syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote()) syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote())
@ -985,7 +995,8 @@ func Purge(ctx context.Context, f fs.Fs, dir string) (err error) {
// Delete removes all the contents of a container. Unlike Purge, it // Delete removes all the contents of a container. Unlike Purge, it
// obeys includes and excludes. // obeys includes and excludes.
func Delete(ctx context.Context, f fs.Fs) error { func Delete(ctx context.Context, f fs.Fs) error {
delChan := make(fs.ObjectsChan, fs.Config.Transfers) ci := fs.GetConfig(ctx)
delChan := make(fs.ObjectsChan, ci.Transfers)
delErr := make(chan error, 1) delErr := make(chan error, 1)
go func() { go func() {
delErr <- DeleteFiles(ctx, delChan) delErr <- DeleteFiles(ctx, delChan)
@ -1008,10 +1019,11 @@ func Delete(ctx context.Context, f fs.Fs) error {
// //
// If the error was ErrorDirNotFound then it will be ignored // If the error was ErrorDirNotFound then it will be ignored
func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan { func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan {
o := make(fs.ObjectsChan, fs.Config.Checkers) ci := fs.GetConfig(ctx)
o := make(fs.ObjectsChan, ci.Checkers)
go func() { go func() {
defer close(o) defer close(o)
err := walk.ListR(ctx, f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { err := walk.ListR(ctx, f, dir, true, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(obj fs.Object) { entries.ForObject(func(obj fs.Object) {
o <- obj o <- obj
}) })
@ -1054,6 +1066,7 @@ type readCloser struct {
// if count >= 0 then only that many characters will be output // if count >= 0 then only that many characters will be output
func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error { func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
var mu sync.Mutex var mu sync.Mutex
ci := fs.GetConfig(ctx)
return ListFn(ctx, f, func(o fs.Object) { return ListFn(ctx, f, func(o fs.Object) {
var err error var err error
tr := accounting.Stats(ctx).NewTransfer(o) tr := accounting.Stats(ctx).NewTransfer(o)
@ -1072,7 +1085,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
if opt.Start > 0 || opt.End >= 0 { if opt.Start > 0 || opt.End >= 0 {
options = append(options, &opt) options = append(options, &opt)
} }
for _, option := range fs.Config.DownloadHeaders { for _, option := range ci.DownloadHeaders {
options = append(options, option) options = append(options, option)
} }
in, err := o.Open(ctx, options...) in, err := o.Open(ctx, options...)
@ -1098,6 +1111,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
// Rcat reads data from the Reader until EOF and uploads it to a file on remote // Rcat reads data from the Reader until EOF and uploads it to a file on remote
func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) { func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) {
ci := fs.GetConfig(ctx)
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1) tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1)
defer func() { defer func() {
tr.Done(ctx, err) tr.Done(ctx, err)
@ -1108,7 +1122,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
var trackingIn io.Reader var trackingIn io.Reader
var hasher *hash.MultiHasher var hasher *hash.MultiHasher
var options []fs.OpenOption var options []fs.OpenOption
if !fs.Config.IgnoreChecksum { if !ci.IgnoreChecksum {
hashes := hash.NewHashSet(fdst.Hashes().GetOne()) // just pick one hash hashes := hash.NewHashSet(fdst.Hashes().GetOne()) // just pick one hash
hashOption := &fs.HashesOption{Hashes: hashes} hashOption := &fs.HashesOption{Hashes: hashes}
options = append(options, hashOption) options = append(options, hashOption)
@ -1120,7 +1134,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
} else { } else {
trackingIn = readCounter trackingIn = readCounter
} }
for _, option := range fs.Config.UploadHeaders { for _, option := range ci.UploadHeaders {
options = append(options, option) options = append(options, option)
} }
@ -1140,7 +1154,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
} }
// check if file small enough for direct upload // check if file small enough for direct upload
buf := make([]byte, fs.Config.StreamingUploadCutoff) buf := make([]byte, ci.StreamingUploadCutoff)
if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF { if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n) fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n)
src := object.NewMemoryObject(dstFileName, modTime, buf[:n]) src := object.NewMemoryObject(dstFileName, modTime, buf[:n])
@ -1202,9 +1216,10 @@ func PublicLink(ctx context.Context, f fs.Fs, remote string, expire fs.Duration,
// Rmdirs removes any empty directories (or directories only // Rmdirs removes any empty directories (or directories only
// containing empty directories) under f, including f. // containing empty directories) under f, including f.
func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error { func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
ci := fs.GetConfig(ctx)
dirEmpty := make(map[string]bool) dirEmpty := make(map[string]bool)
dirEmpty[dir] = !leaveRoot dirEmpty[dir] = !leaveRoot
err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { err := walk.Walk(ctx, f, dir, true, ci.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil { if err != nil {
err = fs.CountError(err) err = fs.CountError(err)
fs.Errorf(f, "Failed to list %q: %v", dirPath, err) fs.Errorf(f, "Failed to list %q: %v", dirPath, err)
@ -1263,9 +1278,10 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
// GetCompareDest sets up --compare-dest // GetCompareDest sets up --compare-dest
func GetCompareDest(ctx context.Context) (CompareDest fs.Fs, err error) { func GetCompareDest(ctx context.Context) (CompareDest fs.Fs, err error) {
CompareDest, err = cache.Get(ctx, fs.Config.CompareDest) ci := fs.GetConfig(ctx)
CompareDest, err = cache.Get(ctx, ci.CompareDest)
if err != nil { if err != nil {
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", fs.Config.CompareDest, err)) return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", ci.CompareDest, err))
} }
return CompareDest, nil return CompareDest, nil
} }
@ -1299,9 +1315,10 @@ func compareDest(ctx context.Context, dst, src fs.Object, CompareDest fs.Fs) (No
// GetCopyDest sets up --copy-dest // GetCopyDest sets up --copy-dest
func GetCopyDest(ctx context.Context, fdst fs.Fs) (CopyDest fs.Fs, err error) { func GetCopyDest(ctx context.Context, fdst fs.Fs) (CopyDest fs.Fs, err error) {
CopyDest, err = cache.Get(ctx, fs.Config.CopyDest) ci := fs.GetConfig(ctx)
CopyDest, err = cache.Get(ctx, ci.CopyDest)
if err != nil { if err != nil {
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", fs.Config.CopyDest, err)) return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", ci.CopyDest, err))
} }
if !SameConfig(fdst, CopyDest) { if !SameConfig(fdst, CopyDest) {
return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination")) return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination"))
@ -1332,7 +1349,7 @@ func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, bac
default: default:
return false, err return false, err
} }
opt := defaultEqualOpt() opt := defaultEqualOpt(ctx)
opt.updateModTime = false opt.updateModTime = false
if equal(ctx, src, CopyDestFile, opt) { if equal(ctx, src, CopyDestFile, opt) {
if dst == nil || !Equal(ctx, src, dst) { if dst == nil || !Equal(ctx, src, dst) {
@ -1364,9 +1381,10 @@ func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, bac
// //
// Returns True if src does not need to be copied // Returns True if src does not need to be copied
func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CompareOrCopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) { func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CompareOrCopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) {
if fs.Config.CompareDest != "" { ci := fs.GetConfig(ctx)
if ci.CompareDest != "" {
return compareDest(ctx, dst, src, CompareOrCopyDest) return compareDest(ctx, dst, src, CompareOrCopyDest)
} else if fs.Config.CopyDest != "" { } else if ci.CopyDest != "" {
return copyDest(ctx, fdst, dst, src, CompareOrCopyDest, backupDir) return copyDest(ctx, fdst, dst, src, CompareOrCopyDest, backupDir)
} }
return false, nil return false, nil
@ -1378,22 +1396,23 @@ func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, Comp
// Returns a flag which indicates whether the file needs to be // Returns a flag which indicates whether the file needs to be
// transferred or not. // transferred or not.
func NeedTransfer(ctx context.Context, dst, src fs.Object) bool { func NeedTransfer(ctx context.Context, dst, src fs.Object) bool {
ci := fs.GetConfig(ctx)
if dst == nil { if dst == nil {
fs.Debugf(src, "Need to transfer - File not found at Destination") fs.Debugf(src, "Need to transfer - File not found at Destination")
return true return true
} }
// If we should ignore existing files, don't transfer // If we should ignore existing files, don't transfer
if fs.Config.IgnoreExisting { if ci.IgnoreExisting {
fs.Debugf(src, "Destination exists, skipping") fs.Debugf(src, "Destination exists, skipping")
return false return false
} }
// If we should upload unconditionally // If we should upload unconditionally
if fs.Config.IgnoreTimes { if ci.IgnoreTimes {
fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use") fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use")
return true return true
} }
// If UpdateOlder is in effect, skip if dst is newer than src // If UpdateOlder is in effect, skip if dst is newer than src
if fs.Config.UpdateOlder { if ci.UpdateOlder {
srcModTime := src.ModTime(ctx) srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx) dstModTime := dst.ModTime(ctx)
dt := dstModTime.Sub(srcModTime) dt := dstModTime.Sub(srcModTime)
@ -1411,7 +1430,7 @@ func NeedTransfer(ctx context.Context, dst, src fs.Object) bool {
return false return false
case dt <= -modifyWindow: case dt <= -modifyWindow:
// force --checksum on for the check and do update modtimes by default // force --checksum on for the check and do update modtimes by default
opt := defaultEqualOpt() opt := defaultEqualOpt(ctx)
opt.forceModTimeMatch = true opt.forceModTimeMatch = true
if equal(ctx, src, dst, opt) { if equal(ctx, src, dst, opt) {
fs.Debugf(src, "Unchanged skipping") fs.Debugf(src, "Unchanged skipping")
@ -1419,8 +1438,8 @@ func NeedTransfer(ctx context.Context, dst, src fs.Object) bool {
} }
default: default:
// Do a size only compare unless --checksum is set // Do a size only compare unless --checksum is set
opt := defaultEqualOpt() opt := defaultEqualOpt(ctx)
opt.sizeOnly = !fs.Config.CheckSum opt.sizeOnly = !ci.CheckSum
if equal(ctx, src, dst, opt) { if equal(ctx, src, dst, opt) {
fs.Debugf(src, "Destination mod time is within %v of source and files identical, skipping", modifyWindow) fs.Debugf(src, "Destination mod time is within %v of source and files identical, skipping", modifyWindow)
return false return false
@ -1483,7 +1502,7 @@ type copyURLFunc func(ctx context.Context, dstFileName string, in io.ReadCloser,
// copyURLFn copies the data from the url to the function supplied // copyURLFn copies the data from the url to the function supplied
func copyURLFn(ctx context.Context, dstFileName string, url string, dstFileNameFromURL bool, fn copyURLFunc) (err error) { func copyURLFn(ctx context.Context, dstFileName string, url string, dstFileNameFromURL bool, fn copyURLFunc) (err error) {
client := fshttp.NewClient(fs.Config) client := fshttp.NewClient(fs.GetConfig(ctx))
resp, err := client.Get(url) resp, err := client.Get(url)
if err != nil { if err != nil {
return err return err
@ -1531,10 +1550,11 @@ func CopyURLToWriter(ctx context.Context, url string, out io.Writer) (err error)
// BackupDir returns the correctly configured --backup-dir // BackupDir returns the correctly configured --backup-dir
func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err error) { func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err error) {
if fs.Config.BackupDir != "" { ci := fs.GetConfig(ctx)
backupDir, err = cache.Get(ctx, fs.Config.BackupDir) if ci.BackupDir != "" {
backupDir, err = cache.Get(ctx, ci.BackupDir)
if err != nil { if err != nil {
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err)) return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", ci.BackupDir, err))
} }
if !SameConfig(fdst, backupDir) { if !SameConfig(fdst, backupDir) {
return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination")) return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
@ -1547,7 +1567,7 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap")) return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
} }
} else { } else {
if fs.Config.Suffix == "" { if ci.Suffix == "" {
if SameDir(fdst, backupDir) { if SameDir(fdst, backupDir) {
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same")) return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same"))
} }
@ -1556,7 +1576,7 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
} }
} }
} }
} else if fs.Config.Suffix != "" { } else if ci.Suffix != "" {
// --backup-dir is not set but --suffix is - use the destination as the backupDir // --backup-dir is not set but --suffix is - use the destination as the backupDir
backupDir = fdst backupDir = fdst
} else { } else {
@ -1570,7 +1590,7 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
// MoveBackupDir moves a file to the backup dir // MoveBackupDir moves a file to the backup dir
func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err error) { func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err error) {
remoteWithSuffix := SuffixName(dst.Remote()) remoteWithSuffix := SuffixName(ctx, dst.Remote())
overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix) overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix)
_, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst) _, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst)
return err return err
@ -1578,6 +1598,7 @@ func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err err
// moveOrCopyFile moves or copies a single file possibly to a new name // moveOrCopyFile moves or copies a single file possibly to a new name
func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) { func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) {
ci := fs.GetConfig(ctx)
dstFilePath := path.Join(fdst.Root(), dstFileName) dstFilePath := path.Join(fdst.Root(), dstFileName)
srcFilePath := path.Join(fsrc.Root(), srcFileName) srcFilePath := path.Join(fsrc.Root(), srcFileName)
if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath { if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath {
@ -1599,7 +1620,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
// Find dst object if it exists // Find dst object if it exists
var dstObj fs.Object var dstObj fs.Object
if !fs.Config.NoCheckDest { if !ci.NoCheckDest {
dstObj, err = fdst.NewObject(ctx, dstFileName) dstObj, err = fdst.NewObject(ctx, dstFileName)
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
dstObj = nil dstObj = nil
@ -1635,18 +1656,18 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
} }
var backupDir, copyDestDir fs.Fs var backupDir, copyDestDir fs.Fs
if fs.Config.BackupDir != "" || fs.Config.Suffix != "" { if ci.BackupDir != "" || ci.Suffix != "" {
backupDir, err = BackupDir(ctx, fdst, fsrc, srcFileName) backupDir, err = BackupDir(ctx, fdst, fsrc, srcFileName)
if err != nil { if err != nil {
return errors.Wrap(err, "creating Fs for --backup-dir failed") return errors.Wrap(err, "creating Fs for --backup-dir failed")
} }
} }
if fs.Config.CompareDest != "" { if ci.CompareDest != "" {
copyDestDir, err = GetCompareDest(ctx) copyDestDir, err = GetCompareDest(ctx)
if err != nil { if err != nil {
return err return err
} }
} else if fs.Config.CopyDest != "" { } else if ci.CopyDest != "" {
copyDestDir, err = GetCopyDest(ctx, fdst) copyDestDir, err = GetCopyDest(ctx, fdst)
if err != nil { if err != nil {
return err return err
@ -1853,6 +1874,7 @@ func (l *ListFormat) Format(entry *ListJSONItem) (result string) {
// It does this by loading the directory tree into memory (using ListR // It does this by loading the directory tree into memory (using ListR
// if available) and doing renames in parallel. // if available) and doing renames in parallel.
func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) { func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) {
ci := fs.GetConfig(ctx)
// Use DirMove if possible // Use DirMove if possible
if doDirMove := f.Features().DirMove; doDirMove != nil { if doDirMove := f.Features().DirMove; doDirMove != nil {
err = doDirMove(ctx, f, srcRemote, dstRemote) err = doDirMove(ctx, f, srcRemote, dstRemote)
@ -1885,9 +1907,9 @@ func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err err
o fs.Object o fs.Object
newPath string newPath string
} }
renames := make(chan rename, fs.Config.Transfers) renames := make(chan rename, ci.Transfers)
g, gCtx := errgroup.WithContext(context.Background()) g, gCtx := errgroup.WithContext(context.Background())
for i := 0; i < fs.Config.Transfers; i++ { for i := 0; i < ci.Transfers; i++ {
g.Go(func() error { g.Go(func() error {
for job := range renames { for job := range renames {
dstOverwritten, _ := f.NewObject(gCtx, job.newPath) dstOverwritten, _ := f.NewObject(gCtx, job.newPath)
@ -2019,11 +2041,12 @@ func skipDestructiveChoose(ctx context.Context, subject interface{}, action stri
// to action subject". // to action subject".
func SkipDestructive(ctx context.Context, subject interface{}, action string) (skip bool) { func SkipDestructive(ctx context.Context, subject interface{}, action string) (skip bool) {
var flag string var flag string
ci := fs.GetConfig(ctx)
switch { switch {
case fs.Config.DryRun: case ci.DryRun:
flag = "--dry-run" flag = "--dry-run"
skip = true skip = true
case fs.Config.Interactive: case ci.Interactive:
flag = "--interactive" flag = "--interactive"
interactiveMu.Lock() interactiveMu.Lock()
defer interactiveMu.Unlock() defer interactiveMu.Unlock()

View File

@ -3,6 +3,7 @@
package operations package operations
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@ -13,6 +14,8 @@ import (
) )
func TestSizeDiffers(t *testing.T) { func TestSizeDiffers(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
when := time.Now() when := time.Now()
for _, test := range []struct { for _, test := range []struct {
ignoreSize bool ignoreSize bool
@ -31,10 +34,10 @@ func TestSizeDiffers(t *testing.T) {
} { } {
src := object.NewStaticObjectInfo("a", when, test.srcSize, true, nil, nil) src := object.NewStaticObjectInfo("a", when, test.srcSize, true, nil, nil)
dst := object.NewStaticObjectInfo("a", when, test.dstSize, true, nil, nil) dst := object.NewStaticObjectInfo("a", when, test.dstSize, true, nil, nil)
oldIgnoreSize := fs.Config.IgnoreSize oldIgnoreSize := ci.IgnoreSize
fs.Config.IgnoreSize = test.ignoreSize ci.IgnoreSize = test.ignoreSize
got := sizeDiffers(src, dst) got := sizeDiffers(ctx, src, dst)
fs.Config.IgnoreSize = oldIgnoreSize ci.IgnoreSize = oldIgnoreSize
assert.Equal(t, test.want, got, fmt.Sprintf("ignoreSize=%v, srcSize=%v, dstSize=%v", test.ignoreSize, test.srcSize, test.dstSize)) assert.Equal(t, test.want, got, fmt.Sprintf("ignoreSize=%v, srcSize=%v, dstSize=%v", test.ignoreSize, test.srcSize, test.dstSize))
} }
} }

View File

@ -106,6 +106,7 @@ func TestLs(t *testing.T) {
func TestLsWithFilesFrom(t *testing.T) { func TestLsWithFilesFrom(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1) file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1)
@ -132,10 +133,10 @@ func TestLsWithFilesFrom(t *testing.T) {
assert.Equal(t, " 60 potato2\n", buf.String()) assert.Equal(t, " 60 potato2\n", buf.String())
// Now try with --no-traverse // Now try with --no-traverse
oldNoTraverse := fs.Config.NoTraverse oldNoTraverse := ci.NoTraverse
fs.Config.NoTraverse = true ci.NoTraverse = true
defer func() { defer func() {
fs.Config.NoTraverse = oldNoTraverse ci.NoTraverse = oldNoTraverse
}() }()
buf.Reset() buf.Reset()
@ -269,9 +270,11 @@ func TestHashSums(t *testing.T) {
} }
func TestSuffixName(t *testing.T) { func TestSuffixName(t *testing.T) {
origSuffix, origKeepExt := fs.Config.Suffix, fs.Config.SuffixKeepExtension ctx := context.Background()
ci := fs.GetConfig(ctx)
origSuffix, origKeepExt := ci.Suffix, ci.SuffixKeepExtension
defer func() { defer func() {
fs.Config.Suffix, fs.Config.SuffixKeepExtension = origSuffix, origKeepExt ci.Suffix, ci.SuffixKeepExtension = origSuffix, origKeepExt
}() }()
for _, test := range []struct { for _, test := range []struct {
remote string remote string
@ -288,15 +291,16 @@ func TestSuffixName(t *testing.T) {
{"test", "-suffix", false, "test-suffix"}, {"test", "-suffix", false, "test-suffix"},
{"test", "-suffix", true, "test-suffix"}, {"test", "-suffix", true, "test-suffix"},
} { } {
fs.Config.Suffix = test.suffix ci.Suffix = test.suffix
fs.Config.SuffixKeepExtension = test.keepExt ci.SuffixKeepExtension = test.keepExt
got := operations.SuffixName(test.remote) got := operations.SuffixName(ctx, test.remote)
assert.Equal(t, test.want, got, fmt.Sprintf("%+v", test)) assert.Equal(t, test.want, got, fmt.Sprintf("%+v", test))
} }
} }
func TestCount(t *testing.T) { func TestCount(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1) file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1)
@ -306,8 +310,8 @@ func TestCount(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1, file2, file3) fstest.CheckItems(t, r.Fremote, file1, file2, file3)
// Check the MaxDepth too // Check the MaxDepth too
fs.Config.MaxDepth = 1 ci.MaxDepth = 1
defer func() { fs.Config.MaxDepth = -1 }() defer func() { ci.MaxDepth = -1 }()
objects, size, err := operations.Count(ctx, r.Fremote) objects, size, err := operations.Count(ctx, r.Fremote)
require.NoError(t, err) require.NoError(t, err)
@ -583,6 +587,7 @@ func TestRmdirsLeaveRoot(t *testing.T) {
func TestCopyURL(t *testing.T) { func TestCopyURL(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
@ -635,10 +640,10 @@ func TestCopyURL(t *testing.T) {
status = 0 status = 0
// check when reading from unverified HTTPS server // check when reading from unverified HTTPS server
fs.Config.InsecureSkipVerify = true ci.InsecureSkipVerify = true
fshttp.ResetTransport() fshttp.ResetTransport()
defer func() { defer func() {
fs.Config.InsecureSkipVerify = false ci.InsecureSkipVerify = false
fshttp.ResetTransport() fshttp.ResetTransport()
}() }()
tss := httptest.NewTLSServer(handler) tss := httptest.NewTLSServer(handler)
@ -750,16 +755,17 @@ func TestCaseInsensitiveMoveFile(t *testing.T) {
func TestMoveFileBackupDir(t *testing.T) { func TestMoveFileBackupDir(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
if !operations.CanServerSideMove(r.Fremote) { if !operations.CanServerSideMove(r.Fremote) {
t.Skip("Skipping test as remote does not support server-side move or copy") t.Skip("Skipping test as remote does not support server-side move or copy")
} }
oldBackupDir := fs.Config.BackupDir oldBackupDir := ci.BackupDir
fs.Config.BackupDir = r.FremoteName + "/backup" ci.BackupDir = r.FremoteName + "/backup"
defer func() { defer func() {
fs.Config.BackupDir = oldBackupDir ci.BackupDir = oldBackupDir
}() }()
file1 := r.WriteFile("dst/file1", "file1 contents", t1) file1 := r.WriteFile("dst/file1", "file1 contents", t1)
@ -804,16 +810,17 @@ func TestCopyFile(t *testing.T) {
func TestCopyFileBackupDir(t *testing.T) { func TestCopyFileBackupDir(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
if !operations.CanServerSideMove(r.Fremote) { if !operations.CanServerSideMove(r.Fremote) {
t.Skip("Skipping test as remote does not support server-side move or copy") t.Skip("Skipping test as remote does not support server-side move or copy")
} }
oldBackupDir := fs.Config.BackupDir oldBackupDir := ci.BackupDir
fs.Config.BackupDir = r.FremoteName + "/backup" ci.BackupDir = r.FremoteName + "/backup"
defer func() { defer func() {
fs.Config.BackupDir = oldBackupDir ci.BackupDir = oldBackupDir
}() }()
file1 := r.WriteFile("dst/file1", "file1 contents", t1) file1 := r.WriteFile("dst/file1", "file1 contents", t1)
@ -832,12 +839,13 @@ func TestCopyFileBackupDir(t *testing.T) {
// Test with CompareDest set // Test with CompareDest set
func TestCopyFileCompareDest(t *testing.T) { func TestCopyFileCompareDest(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.CompareDest = r.FremoteName + "/CompareDest" ci.CompareDest = r.FremoteName + "/CompareDest"
defer func() { defer func() {
fs.Config.CompareDest = "" ci.CompareDest = ""
}() }()
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst") fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
require.NoError(t, err) require.NoError(t, err)
@ -913,6 +921,7 @@ func TestCopyFileCompareDest(t *testing.T) {
// Test with CopyDest set // Test with CopyDest set
func TestCopyFileCopyDest(t *testing.T) { func TestCopyFileCopyDest(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
@ -920,9 +929,9 @@ func TestCopyFileCopyDest(t *testing.T) {
t.Skip("Skipping test as remote does not support server-side copy") t.Skip("Skipping test as remote does not support server-side copy")
} }
fs.Config.CopyDest = r.FremoteName + "/CopyDest" ci.CopyDest = r.FremoteName + "/CopyDest"
defer func() { defer func() {
fs.Config.CopyDest = "" ci.CopyDest = ""
}() }()
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst") fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
@ -955,7 +964,7 @@ func TestCopyFileCopyDest(t *testing.T) {
// check old dest, new copy, backup-dir // check old dest, new copy, backup-dir
fs.Config.BackupDir = r.FremoteName + "/BackupDir" ci.BackupDir = r.FremoteName + "/BackupDir"
file3 := r.WriteObject(ctx, "dst/one", "one", t1) file3 := r.WriteObject(ctx, "dst/one", "one", t1)
file2 := r.WriteObject(ctx, "CopyDest/one", "onet2", t2) file2 := r.WriteObject(ctx, "CopyDest/one", "onet2", t2)
@ -971,7 +980,7 @@ func TestCopyFileCopyDest(t *testing.T) {
file3.Path = "BackupDir/one" file3.Path = "BackupDir/one"
fstest.CheckItems(t, r.Fremote, file2, file2dst, file3) fstest.CheckItems(t, r.Fremote, file2, file2dst, file3)
fs.Config.BackupDir = "" ci.BackupDir = ""
// check empty dest, new copy // check empty dest, new copy
file4 := r.WriteObject(ctx, "CopyDest/two", "two", t2) file4 := r.WriteObject(ctx, "CopyDest/two", "two", t2)
@ -1329,11 +1338,13 @@ func TestGetFsInfo(t *testing.T) {
} }
func TestRcat(t *testing.T) { func TestRcat(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
check := func(withChecksum, ignoreChecksum bool) { check := func(withChecksum, ignoreChecksum bool) {
checksumBefore, ignoreChecksumBefore := fs.Config.CheckSum, fs.Config.IgnoreChecksum checksumBefore, ignoreChecksumBefore := ci.CheckSum, ci.IgnoreChecksum
fs.Config.CheckSum, fs.Config.IgnoreChecksum = withChecksum, ignoreChecksum ci.CheckSum, ci.IgnoreChecksum = withChecksum, ignoreChecksum
defer func() { defer func() {
fs.Config.CheckSum, fs.Config.IgnoreChecksum = checksumBefore, ignoreChecksumBefore ci.CheckSum, ci.IgnoreChecksum = checksumBefore, ignoreChecksumBefore
}() }()
var prefix string var prefix string
@ -1350,13 +1361,13 @@ func TestRcat(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
if *fstest.SizeLimit > 0 && int64(fs.Config.StreamingUploadCutoff) > *fstest.SizeLimit { if *fstest.SizeLimit > 0 && int64(ci.StreamingUploadCutoff) > *fstest.SizeLimit {
savedCutoff := fs.Config.StreamingUploadCutoff savedCutoff := ci.StreamingUploadCutoff
defer func() { defer func() {
fs.Config.StreamingUploadCutoff = savedCutoff ci.StreamingUploadCutoff = savedCutoff
}() }()
fs.Config.StreamingUploadCutoff = fs.SizeSuffix(*fstest.SizeLimit) ci.StreamingUploadCutoff = fs.SizeSuffix(*fstest.SizeLimit)
t.Logf("Adjust StreamingUploadCutoff to size limit %s (was %s)", fs.Config.StreamingUploadCutoff, savedCutoff) t.Logf("Adjust StreamingUploadCutoff to size limit %s (was %s)", ci.StreamingUploadCutoff, savedCutoff)
} }
fstest.CheckListing(t, r.Fremote, []fstest.Item{}) fstest.CheckListing(t, r.Fremote, []fstest.Item{})
@ -1364,7 +1375,7 @@ func TestRcat(t *testing.T) {
data1 := "this is some really nice test data" data1 := "this is some really nice test data"
path1 := prefix + "small_file_from_pipe" path1 := prefix + "small_file_from_pipe"
data2 := string(make([]byte, fs.Config.StreamingUploadCutoff+1)) data2 := string(make([]byte, ci.StreamingUploadCutoff+1))
path2 := prefix + "big_file_from_pipe" path2 := prefix + "big_file_from_pipe"
in := ioutil.NopCloser(strings.NewReader(data1)) in := ioutil.NopCloser(strings.NewReader(data1))
@ -1418,14 +1429,15 @@ func TestRcatSize(t *testing.T) {
func TestCopyFileMaxTransfer(t *testing.T) { func TestCopyFileMaxTransfer(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
old := fs.Config.MaxTransfer old := ci.MaxTransfer
oldMode := fs.Config.CutoffMode oldMode := ci.CutoffMode
defer func() { defer func() {
fs.Config.MaxTransfer = old ci.MaxTransfer = old
fs.Config.CutoffMode = oldMode ci.CutoffMode = oldMode
accounting.Stats(ctx).ResetCounters() accounting.Stats(ctx).ResetCounters()
}() }()
@ -1436,8 +1448,8 @@ func TestCopyFileMaxTransfer(t *testing.T) {
file4 := r.WriteFile("TestCopyFileMaxTransfer/file4", "file4 contents"+random.String(sizeCutoff), t2) file4 := r.WriteFile("TestCopyFileMaxTransfer/file4", "file4 contents"+random.String(sizeCutoff), t2)
// Cutoff mode: Hard // Cutoff mode: Hard
fs.Config.MaxTransfer = sizeCutoff ci.MaxTransfer = sizeCutoff
fs.Config.CutoffMode = fs.CutoffModeHard ci.CutoffMode = fs.CutoffModeHard
// file1: Show a small file gets transferred OK // file1: Show a small file gets transferred OK
accounting.Stats(ctx).ResetCounters() accounting.Stats(ctx).ResetCounters()
@ -1456,7 +1468,7 @@ func TestCopyFileMaxTransfer(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
// Cutoff mode: Cautious // Cutoff mode: Cautious
fs.Config.CutoffMode = fs.CutoffModeCautious ci.CutoffMode = fs.CutoffModeCautious
// file3: show a large file does not get transferred // file3: show a large file does not get transferred
accounting.Stats(ctx).ResetCounters() accounting.Stats(ctx).ResetCounters()
@ -1473,7 +1485,7 @@ func TestCopyFileMaxTransfer(t *testing.T) {
} }
// Cutoff mode: Soft // Cutoff mode: Soft
fs.Config.CutoffMode = fs.CutoffModeSoft ci.CutoffMode = fs.CutoffModeSoft
// file4: show a large file does get transferred this time // file4: show a large file does get transferred this time
accounting.Stats(ctx).ResetCounters() accounting.Stats(ctx).ResetCounters()

View File

@ -75,10 +75,12 @@ func TestOptionsGet(t *testing.T) {
func TestOptionsGetMarshal(t *testing.T) { func TestOptionsGetMarshal(t *testing.T) {
defer clearOptionBlock()() defer clearOptionBlock()()
ctx := context.Background()
ci := fs.GetConfig(ctx)
// Add some real options // Add some real options
AddOption("http", &httplib.DefaultOpt) AddOption("http", &httplib.DefaultOpt)
AddOption("main", fs.Config) AddOption("main", ci)
AddOption("rc", &DefaultOpt) AddOption("rc", &DefaultOpt)
// get them // get them

View File

@ -30,6 +30,7 @@ type syncCopyMove struct {
deleteEmptySrcDirs bool deleteEmptySrcDirs bool
dir string dir string
// internal state // internal state
ci *fs.ConfigInfo // global config
ctx context.Context // internal context for controlling go-routines ctx context.Context // internal context for controlling go-routines
cancel func() // cancel the context cancel func() // cancel the context
inCtx context.Context // internal context for controlling march inCtx context.Context // internal context for controlling march
@ -97,7 +98,9 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) { if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) {
return nil, fserrors.FatalError(fs.ErrorOverlapping) return nil, fserrors.FatalError(fs.ErrorOverlapping)
} }
ci := fs.GetConfig(ctx)
s := &syncCopyMove{ s := &syncCopyMove{
ci: ci,
fdst: fdst, fdst: fdst,
fsrc: fsrc, fsrc: fsrc,
deleteMode: deleteMode, deleteMode: deleteMode,
@ -105,42 +108,42 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
copyEmptySrcDirs: copyEmptySrcDirs, copyEmptySrcDirs: copyEmptySrcDirs,
deleteEmptySrcDirs: deleteEmptySrcDirs, deleteEmptySrcDirs: deleteEmptySrcDirs,
dir: "", dir: "",
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers), srcFilesChan: make(chan fs.Object, ci.Checkers+ci.Transfers),
srcFilesResult: make(chan error, 1), srcFilesResult: make(chan error, 1),
dstFilesResult: make(chan error, 1), dstFilesResult: make(chan error, 1),
dstEmptyDirs: make(map[string]fs.DirEntry), dstEmptyDirs: make(map[string]fs.DirEntry),
srcEmptyDirs: make(map[string]fs.DirEntry), srcEmptyDirs: make(map[string]fs.DirEntry),
noTraverse: fs.Config.NoTraverse, noTraverse: ci.NoTraverse,
noCheckDest: fs.Config.NoCheckDest, noCheckDest: ci.NoCheckDest,
noUnicodeNormalization: fs.Config.NoUnicodeNormalization, noUnicodeNormalization: ci.NoUnicodeNormalization,
deleteFilesCh: make(chan fs.Object, fs.Config.Checkers), deleteFilesCh: make(chan fs.Object, ci.Checkers),
trackRenames: fs.Config.TrackRenames, trackRenames: ci.TrackRenames,
commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(), commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(),
modifyWindow: fs.GetModifyWindow(ctx, fsrc, fdst), modifyWindow: fs.GetModifyWindow(ctx, fsrc, fdst),
trackRenamesCh: make(chan fs.Object, fs.Config.Checkers), trackRenamesCh: make(chan fs.Object, ci.Checkers),
checkFirst: fs.Config.CheckFirst, checkFirst: ci.CheckFirst,
} }
backlog := fs.Config.MaxBacklog backlog := ci.MaxBacklog
if s.checkFirst { if s.checkFirst {
fs.Infof(s.fdst, "Running all checks before starting transfers") fs.Infof(s.fdst, "Running all checks before starting transfers")
backlog = -1 backlog = -1
} }
var err error var err error
s.toBeChecked, err = newPipe(fs.Config.OrderBy, accounting.Stats(ctx).SetCheckQueue, backlog) s.toBeChecked, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetCheckQueue, backlog)
if err != nil { if err != nil {
return nil, err return nil, err
} }
s.toBeUploaded, err = newPipe(fs.Config.OrderBy, accounting.Stats(ctx).SetTransferQueue, backlog) s.toBeUploaded, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetTransferQueue, backlog)
if err != nil { if err != nil {
return nil, err return nil, err
} }
s.toBeRenamed, err = newPipe(fs.Config.OrderBy, accounting.Stats(ctx).SetRenameQueue, backlog) s.toBeRenamed, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetRenameQueue, backlog)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// If a max session duration has been defined add a deadline to the context // If a max session duration has been defined add a deadline to the context
if fs.Config.MaxDuration > 0 { if ci.MaxDuration > 0 {
endTime := time.Now().Add(fs.Config.MaxDuration) endTime := time.Now().Add(ci.MaxDuration)
fs.Infof(s.fdst, "Transfer session deadline: %s", endTime.Format("2006/01/02 15:04:05")) fs.Infof(s.fdst, "Transfer session deadline: %s", endTime.Format("2006/01/02 15:04:05"))
s.ctx, s.cancel = context.WithDeadline(ctx, endTime) s.ctx, s.cancel = context.WithDeadline(ctx, endTime)
} else { } else {
@ -152,7 +155,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
fs.Errorf(nil, "Ignoring --no-traverse with sync") fs.Errorf(nil, "Ignoring --no-traverse with sync")
s.noTraverse = false s.noTraverse = false
} }
s.trackRenamesStrategy, err = parseTrackRenamesStrategy(fs.Config.TrackRenamesStrategy) s.trackRenamesStrategy, err = parseTrackRenamesStrategy(ci.TrackRenamesStrategy)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -160,7 +163,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
if s.deleteMode != fs.DeleteModeOff { if s.deleteMode != fs.DeleteModeOff {
return nil, errors.New("can't use --no-check-dest with sync: use copy instead") return nil, errors.New("can't use --no-check-dest with sync: use copy instead")
} }
if fs.Config.Immutable { if ci.Immutable {
return nil, errors.New("can't use --no-check-dest with --immutable") return nil, errors.New("can't use --no-check-dest with --immutable")
} }
if s.backupDir != nil { if s.backupDir != nil {
@ -199,20 +202,20 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
} }
} }
// Make Fs for --backup-dir if required // Make Fs for --backup-dir if required
if fs.Config.BackupDir != "" || fs.Config.Suffix != "" { if ci.BackupDir != "" || ci.Suffix != "" {
var err error var err error
s.backupDir, err = operations.BackupDir(ctx, fdst, fsrc, "") s.backupDir, err = operations.BackupDir(ctx, fdst, fsrc, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
if fs.Config.CompareDest != "" { if ci.CompareDest != "" {
var err error var err error
s.compareCopyDest, err = operations.GetCompareDest(ctx) s.compareCopyDest, err = operations.GetCompareDest(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} else if fs.Config.CopyDest != "" { } else if ci.CopyDest != "" {
var err error var err error
s.compareCopyDest, err = operations.GetCopyDest(ctx, fdst) s.compareCopyDest, err = operations.GetCopyDest(ctx, fdst)
if err != nil { if err != nil {
@ -312,7 +315,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
} }
if !NoNeedTransfer && operations.NeedTransfer(s.ctx, pair.Dst, pair.Src) { if !NoNeedTransfer && operations.NeedTransfer(s.ctx, pair.Dst, pair.Src) {
// If files are treated as immutable, fail if destination exists and does not match // If files are treated as immutable, fail if destination exists and does not match
if fs.Config.Immutable && pair.Dst != nil { if s.ci.Immutable && pair.Dst != nil {
fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified") fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified")
s.processError(fs.ErrorImmutableModified) s.processError(fs.ErrorImmutableModified)
} else { } else {
@ -389,9 +392,9 @@ func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs,
// This starts the background checkers. // This starts the background checkers.
func (s *syncCopyMove) startCheckers() { func (s *syncCopyMove) startCheckers() {
s.checkerWg.Add(fs.Config.Checkers) s.checkerWg.Add(s.ci.Checkers)
for i := 0; i < fs.Config.Checkers; i++ { for i := 0; i < s.ci.Checkers; i++ {
fraction := (100 * i) / fs.Config.Checkers fraction := (100 * i) / s.ci.Checkers
go s.pairChecker(s.toBeChecked, s.toBeUploaded, fraction, &s.checkerWg) go s.pairChecker(s.toBeChecked, s.toBeUploaded, fraction, &s.checkerWg)
} }
} }
@ -405,9 +408,9 @@ func (s *syncCopyMove) stopCheckers() {
// This starts the background transfers // This starts the background transfers
func (s *syncCopyMove) startTransfers() { func (s *syncCopyMove) startTransfers() {
s.transfersWg.Add(fs.Config.Transfers) s.transfersWg.Add(s.ci.Transfers)
for i := 0; i < fs.Config.Transfers; i++ { for i := 0; i < s.ci.Transfers; i++ {
fraction := (100 * i) / fs.Config.Transfers fraction := (100 * i) / s.ci.Transfers
go s.pairCopyOrMove(s.ctx, s.toBeUploaded, s.fdst, fraction, &s.transfersWg) go s.pairCopyOrMove(s.ctx, s.toBeUploaded, s.fdst, fraction, &s.transfersWg)
} }
} }
@ -424,9 +427,9 @@ func (s *syncCopyMove) startRenamers() {
if !s.trackRenames { if !s.trackRenames {
return return
} }
s.renamerWg.Add(fs.Config.Checkers) s.renamerWg.Add(s.ci.Checkers)
for i := 0; i < fs.Config.Checkers; i++ { for i := 0; i < s.ci.Checkers; i++ {
fraction := (100 * i) / fs.Config.Checkers fraction := (100 * i) / s.ci.Checkers
go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, fraction, &s.renamerWg) go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, fraction, &s.renamerWg)
} }
} }
@ -492,13 +495,13 @@ func (s *syncCopyMove) stopDeleters() {
// checkSrcMap is clear then it assumes that the any source files that // checkSrcMap is clear then it assumes that the any source files that
// have been found have been removed from dstFiles already. // have been found have been removed from dstFiles already.
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error { func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
if accounting.Stats(s.ctx).Errored() && !fs.Config.IgnoreErrors { if accounting.Stats(s.ctx).Errored() && !s.ci.IgnoreErrors {
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting) fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
return fs.ErrorNotDeleting return fs.ErrorNotDeleting
} }
// Delete the spare files // Delete the spare files
toDelete := make(fs.ObjectsChan, fs.Config.Transfers) toDelete := make(fs.ObjectsChan, s.ci.Transfers)
go func() { go func() {
outer: outer:
for remote, o := range s.dstFiles { for remote, o := range s.dstFiles {
@ -524,11 +527,11 @@ func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
// This deletes the empty directories in the slice passed in. It // This deletes the empty directories in the slice passed in. It
// ignores any errors deleting directories // ignores any errors deleting directories
func deleteEmptyDirectories(ctx context.Context, f fs.Fs, entriesMap map[string]fs.DirEntry) error { func (s *syncCopyMove) deleteEmptyDirectories(ctx context.Context, f fs.Fs, entriesMap map[string]fs.DirEntry) error {
if len(entriesMap) == 0 { if len(entriesMap) == 0 {
return nil return nil
} }
if accounting.Stats(ctx).Errored() && !fs.Config.IgnoreErrors { if accounting.Stats(ctx).Errored() && !s.ci.IgnoreErrors {
fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs) fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs)
return fs.ErrorNotDeletingDirs return fs.ErrorNotDeletingDirs
} }
@ -729,14 +732,14 @@ func (s *syncCopyMove) makeRenameMap() {
} }
// pump all the dstFiles into in // pump all the dstFiles into in
in := make(chan fs.Object, fs.Config.Checkers) in := make(chan fs.Object, s.ci.Checkers)
go s.pumpMapToChan(s.dstFiles, in) go s.pumpMapToChan(s.dstFiles, in)
// now make a map of size,hash for all dstFiles // now make a map of size,hash for all dstFiles
s.renameMap = make(map[string][]fs.Object) s.renameMap = make(map[string][]fs.Object)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(fs.Config.Transfers) wg.Add(s.ci.Transfers)
for i := 0; i < fs.Config.Transfers; i++ { for i := 0; i < s.ci.Transfers; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
for obj := range in { for obj := range in {
@ -829,7 +832,7 @@ func (s *syncCopyMove) run() error {
NoCheckDest: s.noCheckDest, NoCheckDest: s.noCheckDest,
NoUnicodeNormalization: s.noUnicodeNormalization, NoUnicodeNormalization: s.noUnicodeNormalization,
} }
s.processError(m.Run()) s.processError(m.Run(s.ctx))
s.stopTrackRenames() s.stopTrackRenames()
if s.trackRenames { if s.trackRenames {
@ -860,7 +863,7 @@ func (s *syncCopyMove) run() error {
// Delete files after // Delete files after
if s.deleteMode == fs.DeleteModeAfter { if s.deleteMode == fs.DeleteModeAfter {
if s.currentError() != nil && !fs.Config.IgnoreErrors { if s.currentError() != nil && !s.ci.IgnoreErrors {
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting) fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
} else { } else {
s.processError(s.deleteFiles(false)) s.processError(s.deleteFiles(false))
@ -869,10 +872,10 @@ func (s *syncCopyMove) run() error {
// Prune empty directories // Prune empty directories
if s.deleteMode != fs.DeleteModeOff { if s.deleteMode != fs.DeleteModeOff {
if s.currentError() != nil && !fs.Config.IgnoreErrors { if s.currentError() != nil && !s.ci.IgnoreErrors {
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs) fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs)
} else { } else {
s.processError(deleteEmptyDirectories(s.ctx, s.fdst, s.dstEmptyDirs)) s.processError(s.deleteEmptyDirectories(s.ctx, s.fdst, s.dstEmptyDirs))
} }
} }
@ -880,7 +883,7 @@ func (s *syncCopyMove) run() error {
// if DoMove and --delete-empty-src-dirs flag is set // if DoMove and --delete-empty-src-dirs flag is set
if s.DoMove && s.deleteEmptySrcDirs { if s.DoMove && s.deleteEmptySrcDirs {
//delete empty subdirectories that were part of the move //delete empty subdirectories that were part of the move
s.processError(deleteEmptyDirectories(s.ctx, s.fsrc, s.srcEmptyDirs)) s.processError(s.deleteEmptyDirectories(s.ctx, s.fsrc, s.srcEmptyDirs))
} }
// Read the error out of the context if there is one // Read the error out of the context if there is one
@ -1038,12 +1041,13 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse
// //
// dir is the start directory, "" for root // dir is the start directory, "" for root
func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
ci := fs.GetConfig(ctx)
if deleteMode != fs.DeleteModeOff && DoMove { if deleteMode != fs.DeleteModeOff && DoMove {
return fserrors.FatalError(errors.New("can't delete and move at the same time")) return fserrors.FatalError(errors.New("can't delete and move at the same time"))
} }
// Run an extra pass to delete only // Run an extra pass to delete only
if deleteMode == fs.DeleteModeBefore { if deleteMode == fs.DeleteModeBefore {
if fs.Config.TrackRenames { if ci.TrackRenames {
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames")) return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
} }
// only delete stuff during in this pass // only delete stuff during in this pass
@ -1067,7 +1071,8 @@ func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
// Sync fsrc into fdst // Sync fsrc into fdst
func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error { func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
return runSyncCopyMove(ctx, fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs) ci := fs.GetConfig(ctx)
return runSyncCopyMove(ctx, fdst, fsrc, ci.DeleteMode, false, false, copyEmptySrcDirs)
} }
// CopyDir copies fsrc into fdst // CopyDir copies fsrc into fdst

View File

@ -39,14 +39,15 @@ func TestMain(m *testing.M) {
// Check dry run is working // Check dry run is working
func TestCopyWithDryRun(t *testing.T) { func TestCopyWithDryRun(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
r.Mkdir(ctx, r.Fremote) r.Mkdir(ctx, r.Fremote)
fs.Config.DryRun = true ci.DryRun = true
err := CopyDir(ctx, r.Fremote, r.Flocal, false) err := CopyDir(ctx, r.Fremote, r.Flocal, false)
fs.Config.DryRun = false ci.DryRun = false
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@ -86,11 +87,12 @@ func TestCopyMissingDirectory(t *testing.T) {
// Now with --no-traverse // Now with --no-traverse
func TestCopyNoTraverse(t *testing.T) { func TestCopyNoTraverse(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.NoTraverse = true ci.NoTraverse = true
defer func() { fs.Config.NoTraverse = false }() defer func() { ci.NoTraverse = false }()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
@ -104,11 +106,12 @@ func TestCopyNoTraverse(t *testing.T) {
// Now with --check-first // Now with --check-first
func TestCopyCheckFirst(t *testing.T) { func TestCopyCheckFirst(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.CheckFirst = true ci.CheckFirst = true
defer func() { fs.Config.CheckFirst = false }() defer func() { ci.CheckFirst = false }()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
@ -122,11 +125,12 @@ func TestCopyCheckFirst(t *testing.T) {
// Now with --no-traverse // Now with --no-traverse
func TestSyncNoTraverse(t *testing.T) { func TestSyncNoTraverse(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.NoTraverse = true ci.NoTraverse = true
defer func() { fs.Config.NoTraverse = false }() defer func() { ci.NoTraverse = false }()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
@ -141,14 +145,15 @@ func TestSyncNoTraverse(t *testing.T) {
// Test copy with depth // Test copy with depth
func TestCopyWithDepth(t *testing.T) { func TestCopyWithDepth(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1) file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
file2 := r.WriteFile("hello world2", "hello world2", t2) file2 := r.WriteFile("hello world2", "hello world2", t2)
// Check the MaxDepth too // Check the MaxDepth too
fs.Config.MaxDepth = 1 ci.MaxDepth = 1
defer func() { fs.Config.MaxDepth = -1 }() defer func() { ci.MaxDepth = -1 }()
err := CopyDir(ctx, r.Fremote, r.Flocal, false) err := CopyDir(ctx, r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
@ -160,6 +165,7 @@ func TestCopyWithDepth(t *testing.T) {
// Test copy with files from // Test copy with files from
func testCopyWithFilesFrom(t *testing.T, noTraverse bool) { func testCopyWithFilesFrom(t *testing.T, noTraverse bool) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
file1 := r.WriteFile("potato2", "hello world", t1) file1 := r.WriteFile("potato2", "hello world", t1)
@ -173,12 +179,12 @@ func testCopyWithFilesFrom(t *testing.T, noTraverse bool) {
// Monkey patch the active filter // Monkey patch the active filter
oldFilter := filter.Active oldFilter := filter.Active
oldNoTraverse := fs.Config.NoTraverse oldNoTraverse := ci.NoTraverse
filter.Active = f filter.Active = f
fs.Config.NoTraverse = noTraverse ci.NoTraverse = noTraverse
unpatch := func() { unpatch := func() {
filter.Active = oldFilter filter.Active = oldFilter
fs.Config.NoTraverse = oldNoTraverse ci.NoTraverse = oldNoTraverse
} }
defer unpatch() defer unpatch()
@ -332,10 +338,11 @@ func TestCopyRedownload(t *testing.T) {
// to be transferred on the second sync. // to be transferred on the second sync.
func TestSyncBasedOnCheckSum(t *testing.T) { func TestSyncBasedOnCheckSum(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.CheckSum = true ci.CheckSum = true
defer func() { fs.Config.CheckSum = false }() defer func() { ci.CheckSum = false }()
file1 := r.WriteFile("check sum", "-", t1) file1 := r.WriteFile("check sum", "-", t1)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@ -367,10 +374,11 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
// only, we expect nothing to to be transferred on the second sync. // only, we expect nothing to to be transferred on the second sync.
func TestSyncSizeOnly(t *testing.T) { func TestSyncSizeOnly(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.SizeOnly = true ci.SizeOnly = true
defer func() { fs.Config.SizeOnly = false }() defer func() { ci.SizeOnly = false }()
file1 := r.WriteFile("sizeonly", "potato", t1) file1 := r.WriteFile("sizeonly", "potato", t1)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@ -402,10 +410,11 @@ func TestSyncSizeOnly(t *testing.T) {
// transferred on the second sync. // transferred on the second sync.
func TestSyncIgnoreSize(t *testing.T) { func TestSyncIgnoreSize(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.IgnoreSize = true ci.IgnoreSize = true
defer func() { fs.Config.IgnoreSize = false }() defer func() { ci.IgnoreSize = false }()
file1 := r.WriteFile("ignore-size", "contents", t1) file1 := r.WriteFile("ignore-size", "contents", t1)
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
@ -434,6 +443,7 @@ func TestSyncIgnoreSize(t *testing.T) {
func TestSyncIgnoreTimes(t *testing.T) { func TestSyncIgnoreTimes(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
file1 := r.WriteBoth(ctx, "existing", "potato", t1) file1 := r.WriteBoth(ctx, "existing", "potato", t1)
@ -447,8 +457,8 @@ func TestSyncIgnoreTimes(t *testing.T) {
// files were identical. // files were identical.
assert.Equal(t, int64(0), accounting.GlobalStats().GetTransfers()) assert.Equal(t, int64(0), accounting.GlobalStats().GetTransfers())
fs.Config.IgnoreTimes = true ci.IgnoreTimes = true
defer func() { fs.Config.IgnoreTimes = false }() defer func() { ci.IgnoreTimes = false }()
accounting.GlobalStats().ResetCounters() accounting.GlobalStats().ResetCounters()
err = Sync(ctx, r.Fremote, r.Flocal, false) err = Sync(ctx, r.Fremote, r.Flocal, false)
@ -464,12 +474,13 @@ func TestSyncIgnoreTimes(t *testing.T) {
func TestSyncIgnoreExisting(t *testing.T) { func TestSyncIgnoreExisting(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
file1 := r.WriteFile("existing", "potato", t1) file1 := r.WriteFile("existing", "potato", t1)
fs.Config.IgnoreExisting = true ci.IgnoreExisting = true
defer func() { fs.Config.IgnoreExisting = false }() defer func() { ci.IgnoreExisting = false }()
accounting.GlobalStats().ResetCounters() accounting.GlobalStats().ResetCounters()
err := Sync(ctx, r.Fremote, r.Flocal, false) err := Sync(ctx, r.Fremote, r.Flocal, false)
@ -488,10 +499,11 @@ func TestSyncIgnoreExisting(t *testing.T) {
func TestSyncIgnoreErrors(t *testing.T) { func TestSyncIgnoreErrors(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
fs.Config.IgnoreErrors = true ci.IgnoreErrors = true
defer func() { defer func() {
fs.Config.IgnoreErrors = false ci.IgnoreErrors = false
r.Finalise() r.Finalise()
}() }()
file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1) file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1)
@ -561,6 +573,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
func TestSyncAfterChangingModtimeOnly(t *testing.T) { func TestSyncAfterChangingModtimeOnly(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
file1 := r.WriteFile("empty space", "-", t2) file1 := r.WriteFile("empty space", "-", t2)
@ -569,8 +582,8 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file2) fstest.CheckItems(t, r.Fremote, file2)
fs.Config.DryRun = true ci.DryRun = true
defer func() { fs.Config.DryRun = false }() defer func() { ci.DryRun = false }()
accounting.GlobalStats().ResetCounters() accounting.GlobalStats().ResetCounters()
err := Sync(ctx, r.Fremote, r.Flocal, false) err := Sync(ctx, r.Fremote, r.Flocal, false)
@ -579,7 +592,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1) fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file2) fstest.CheckItems(t, r.Fremote, file2)
fs.Config.DryRun = false ci.DryRun = false
accounting.GlobalStats().ResetCounters() accounting.GlobalStats().ResetCounters()
err = Sync(ctx, r.Fremote, r.Flocal, false) err = Sync(ctx, r.Fremote, r.Flocal, false)
@ -591,6 +604,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) { func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
@ -599,9 +613,9 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
return return
} }
fs.Config.NoUpdateModTime = true ci.NoUpdateModTime = true
defer func() { defer func() {
fs.Config.NoUpdateModTime = false ci.NoUpdateModTime = false
}() }()
file1 := r.WriteFile("empty space", "-", t2) file1 := r.WriteFile("empty space", "-", t2)
@ -703,16 +717,17 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
// Sync after removing a file and adding a file --dry-run // Sync after removing a file and adding a file --dry-run
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) { func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1) file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
file2 := r.WriteObject(ctx, "potato", "SMALLER BUT SAME DATE", t2) file2 := r.WriteObject(ctx, "potato", "SMALLER BUT SAME DATE", t2)
file3 := r.WriteBoth(ctx, "empty space", "-", t2) file3 := r.WriteBoth(ctx, "empty space", "-", t2)
fs.Config.DryRun = true ci.DryRun = true
accounting.GlobalStats().ResetCounters() accounting.GlobalStats().ResetCounters()
err := Sync(ctx, r.Fremote, r.Flocal, false) err := Sync(ctx, r.Fremote, r.Flocal, false)
fs.Config.DryRun = false ci.DryRun = false
require.NoError(t, err) require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file3, file1) fstest.CheckItems(t, r.Flocal, file3, file1)
@ -885,16 +900,20 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
// Sync test delete after // Sync test delete after
func TestSyncDeleteAfter(t *testing.T) { func TestSyncDeleteAfter(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
// This is the default so we've checked this already // This is the default so we've checked this already
// check it is the default // check it is the default
require.Equal(t, fs.Config.DeleteMode, fs.DeleteModeAfter, "Didn't default to --delete-after") require.Equal(t, ci.DeleteMode, fs.DeleteModeAfter, "Didn't default to --delete-after")
} }
// Sync test delete during // Sync test delete during
func TestSyncDeleteDuring(t *testing.T) { func TestSyncDeleteDuring(t *testing.T) {
fs.Config.DeleteMode = fs.DeleteModeDuring ctx := context.Background()
ci := fs.GetConfig(ctx)
ci.DeleteMode = fs.DeleteModeDuring
defer func() { defer func() {
fs.Config.DeleteMode = fs.DeleteModeDefault ci.DeleteMode = fs.DeleteModeDefault
}() }()
TestSyncAfterRemovingAFileAndAddingAFile(t) TestSyncAfterRemovingAFileAndAddingAFile(t)
@ -902,9 +921,11 @@ func TestSyncDeleteDuring(t *testing.T) {
// Sync test delete before // Sync test delete before
func TestSyncDeleteBefore(t *testing.T) { func TestSyncDeleteBefore(t *testing.T) {
fs.Config.DeleteMode = fs.DeleteModeBefore ctx := context.Background()
ci := fs.GetConfig(ctx)
ci.DeleteMode = fs.DeleteModeBefore
defer func() { defer func() {
fs.Config.DeleteMode = fs.DeleteModeDefault ci.DeleteMode = fs.DeleteModeDefault
}() }()
TestSyncAfterRemovingAFileAndAddingAFile(t) TestSyncAfterRemovingAFileAndAddingAFile(t)
@ -913,12 +934,13 @@ func TestSyncDeleteBefore(t *testing.T) {
// Copy test delete before - shouldn't delete anything // Copy test delete before - shouldn't delete anything
func TestCopyDeleteBefore(t *testing.T) { func TestCopyDeleteBefore(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.DeleteMode = fs.DeleteModeBefore ci.DeleteMode = fs.DeleteModeBefore
defer func() { defer func() {
fs.Config.DeleteMode = fs.DeleteModeDefault ci.DeleteMode = fs.DeleteModeDefault
}() }()
file1 := r.WriteObject(ctx, "potato", "hopefully not deleted", t1) file1 := r.WriteObject(ctx, "potato", "hopefully not deleted", t1)
@ -997,6 +1019,7 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
// Test with UpdateOlder set // Test with UpdateOlder set
func TestSyncWithUpdateOlder(t *testing.T) { func TestSyncWithUpdateOlder(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
if fs.GetModifyWindow(ctx, r.Fremote) == fs.ModTimeNotSupported { if fs.GetModifyWindow(ctx, r.Fremote) == fs.ModTimeNotSupported {
@ -1016,12 +1039,12 @@ func TestSyncWithUpdateOlder(t *testing.T) {
fourO := r.WriteObject(ctx, "four", "FOURFOUR", t2minus) fourO := r.WriteObject(ctx, "four", "FOURFOUR", t2minus)
fstest.CheckItems(t, r.Fremote, oneO, twoO, threeO, fourO) fstest.CheckItems(t, r.Fremote, oneO, twoO, threeO, fourO)
fs.Config.UpdateOlder = true ci.UpdateOlder = true
oldModifyWindow := fs.Config.ModifyWindow oldModifyWindow := ci.ModifyWindow
fs.Config.ModifyWindow = fs.ModTimeNotSupported ci.ModifyWindow = fs.ModTimeNotSupported
defer func() { defer func() {
fs.Config.UpdateOlder = false ci.UpdateOlder = false
fs.Config.ModifyWindow = oldModifyWindow ci.ModifyWindow = oldModifyWindow
}() }()
err := Sync(ctx, r.Fremote, r.Flocal, false) err := Sync(ctx, r.Fremote, r.Flocal, false)
@ -1034,8 +1057,8 @@ func TestSyncWithUpdateOlder(t *testing.T) {
} }
// now enable checksum // now enable checksum
fs.Config.CheckSum = true ci.CheckSum = true
defer func() { fs.Config.CheckSum = false }() defer func() { ci.CheckSum = false }()
err = Sync(ctx, r.Fremote, r.Flocal, false) err = Sync(ctx, r.Fremote, r.Flocal, false)
require.NoError(t, err) require.NoError(t, err)
@ -1045,6 +1068,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
// Test with a max transfer duration // Test with a max transfer duration
func TestSyncWithMaxDuration(t *testing.T) { func TestSyncWithMaxDuration(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
if *fstest.RemoteName != "" { if *fstest.RemoteName != "" {
t.Skip("Skipping test on non local remote") t.Skip("Skipping test on non local remote")
} }
@ -1052,14 +1076,14 @@ func TestSyncWithMaxDuration(t *testing.T) {
defer r.Finalise() defer r.Finalise()
maxDuration := 250 * time.Millisecond maxDuration := 250 * time.Millisecond
fs.Config.MaxDuration = maxDuration ci.MaxDuration = maxDuration
bytesPerSecond := 300 bytesPerSecond := 300
accounting.SetBwLimit(fs.SizeSuffix(bytesPerSecond)) accounting.SetBwLimit(fs.SizeSuffix(bytesPerSecond))
oldTransfers := fs.Config.Transfers oldTransfers := ci.Transfers
fs.Config.Transfers = 1 ci.Transfers = 1
defer func() { defer func() {
fs.Config.MaxDuration = 0 // reset back to default ci.MaxDuration = 0 // reset back to default
fs.Config.Transfers = oldTransfers ci.Transfers = oldTransfers
accounting.SetBwLimit(fs.SizeSuffix(0)) accounting.SetBwLimit(fs.SizeSuffix(0))
}() }()
@ -1089,12 +1113,13 @@ func TestSyncWithMaxDuration(t *testing.T) {
// Test with TrackRenames set // Test with TrackRenames set
func TestSyncWithTrackRenames(t *testing.T) { func TestSyncWithTrackRenames(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.TrackRenames = true ci.TrackRenames = true
defer func() { defer func() {
fs.Config.TrackRenames = false ci.TrackRenames = false
}() }()
haveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != hash.None haveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != hash.None
@ -1160,14 +1185,15 @@ func TestRenamesStrategyModtime(t *testing.T) {
func TestSyncWithTrackRenamesStrategyModtime(t *testing.T) { func TestSyncWithTrackRenamesStrategyModtime(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.TrackRenames = true ci.TrackRenames = true
fs.Config.TrackRenamesStrategy = "modtime" ci.TrackRenamesStrategy = "modtime"
defer func() { defer func() {
fs.Config.TrackRenames = false ci.TrackRenames = false
fs.Config.TrackRenamesStrategy = "hash" ci.TrackRenamesStrategy = "hash"
}() }()
canTrackRenames := operations.CanServerSideMove(r.Fremote) && r.Fremote.Precision() != fs.ModTimeNotSupported canTrackRenames := operations.CanServerSideMove(r.Fremote) && r.Fremote.Precision() != fs.ModTimeNotSupported
@ -1199,14 +1225,15 @@ func TestSyncWithTrackRenamesStrategyModtime(t *testing.T) {
func TestSyncWithTrackRenamesStrategyLeaf(t *testing.T) { func TestSyncWithTrackRenamesStrategyLeaf(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.TrackRenames = true ci.TrackRenames = true
fs.Config.TrackRenamesStrategy = "leaf" ci.TrackRenamesStrategy = "leaf"
defer func() { defer func() {
fs.Config.TrackRenames = false ci.TrackRenames = false
fs.Config.TrackRenamesStrategy = "hash" ci.TrackRenamesStrategy = "hash"
}() }()
canTrackRenames := operations.CanServerSideMove(r.Fremote) && r.Fremote.Precision() != fs.ModTimeNotSupported canTrackRenames := operations.CanServerSideMove(r.Fremote) && r.Fremote.Precision() != fs.ModTimeNotSupported
@ -1445,12 +1472,13 @@ func TestSyncOverlap(t *testing.T) {
// Test with CompareDest set // Test with CompareDest set
func TestSyncCompareDest(t *testing.T) { func TestSyncCompareDest(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.CompareDest = r.FremoteName + "/CompareDest" ci.CompareDest = r.FremoteName + "/CompareDest"
defer func() { defer func() {
fs.Config.CompareDest = "" ci.CompareDest = ""
}() }()
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst") fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
@ -1533,6 +1561,7 @@ func TestSyncCompareDest(t *testing.T) {
// Test with CopyDest set // Test with CopyDest set
func TestSyncCopyDest(t *testing.T) { func TestSyncCopyDest(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
@ -1540,9 +1569,9 @@ func TestSyncCopyDest(t *testing.T) {
t.Skip("Skipping test as remote does not support server-side copy") t.Skip("Skipping test as remote does not support server-side copy")
} }
fs.Config.CopyDest = r.FremoteName + "/CopyDest" ci.CopyDest = r.FremoteName + "/CopyDest"
defer func() { defer func() {
fs.Config.CopyDest = "" ci.CopyDest = ""
}() }()
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst") fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
@ -1577,7 +1606,7 @@ func TestSyncCopyDest(t *testing.T) {
// check old dest, new copy, backup-dir // check old dest, new copy, backup-dir
fs.Config.BackupDir = r.FremoteName + "/BackupDir" ci.BackupDir = r.FremoteName + "/BackupDir"
file3 := r.WriteObject(ctx, "dst/one", "one", t1) file3 := r.WriteObject(ctx, "dst/one", "one", t1)
file2 := r.WriteObject(ctx, "CopyDest/one", "onet2", t2) file2 := r.WriteObject(ctx, "CopyDest/one", "onet2", t2)
@ -1594,7 +1623,7 @@ func TestSyncCopyDest(t *testing.T) {
file3.Path = "BackupDir/one" file3.Path = "BackupDir/one"
fstest.CheckItems(t, r.Fremote, file2, file2dst, file3) fstest.CheckItems(t, r.Fremote, file2, file2dst, file3)
fs.Config.BackupDir = "" ci.BackupDir = ""
// check empty dest, new copy // check empty dest, new copy
file4 := r.WriteObject(ctx, "CopyDest/two", "two", t2) file4 := r.WriteObject(ctx, "CopyDest/two", "two", t2)
@ -1637,6 +1666,7 @@ func TestSyncCopyDest(t *testing.T) {
// Test with BackupDir set // Test with BackupDir set
func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeepExtension bool) { func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeepExtension bool) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
@ -1646,10 +1676,10 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
r.Mkdir(ctx, r.Fremote) r.Mkdir(ctx, r.Fremote)
if backupDir != "" { if backupDir != "" {
fs.Config.BackupDir = r.FremoteName + "/" + backupDir ci.BackupDir = r.FremoteName + "/" + backupDir
backupDir += "/" backupDir += "/"
} else { } else {
fs.Config.BackupDir = "" ci.BackupDir = ""
backupDir = "dst/" backupDir = "dst/"
// Exclude the suffix from the sync otherwise the sync // Exclude the suffix from the sync otherwise the sync
// deletes the old backup files // deletes the old backup files
@ -1662,12 +1692,12 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
filter.Active = oldFlt filter.Active = oldFlt
}() }()
} }
fs.Config.Suffix = suffix ci.Suffix = suffix
fs.Config.SuffixKeepExtension = suffixKeepExtension ci.SuffixKeepExtension = suffixKeepExtension
defer func() { defer func() {
fs.Config.BackupDir = "" ci.BackupDir = ""
fs.Config.Suffix = "" ci.Suffix = ""
fs.Config.SuffixKeepExtension = false ci.SuffixKeepExtension = false
}() }()
// Make the setup so we have one, two, three in the dest // Make the setup so we have one, two, three in the dest
@ -1742,6 +1772,7 @@ func TestSyncBackupDirSuffixOnly(t *testing.T) {
// Test with Suffix set // Test with Suffix set
func testSyncSuffix(t *testing.T, suffix string, suffixKeepExtension bool) { func testSyncSuffix(t *testing.T, suffix string, suffixKeepExtension bool) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
@ -1750,12 +1781,12 @@ func testSyncSuffix(t *testing.T, suffix string, suffixKeepExtension bool) {
} }
r.Mkdir(ctx, r.Fremote) r.Mkdir(ctx, r.Fremote)
fs.Config.Suffix = suffix ci.Suffix = suffix
fs.Config.SuffixKeepExtension = suffixKeepExtension ci.SuffixKeepExtension = suffixKeepExtension
defer func() { defer func() {
fs.Config.BackupDir = "" ci.BackupDir = ""
fs.Config.Suffix = "" ci.Suffix = ""
fs.Config.SuffixKeepExtension = false ci.SuffixKeepExtension = false
}() }()
// Make the setup so we have one, two, three in the dest // Make the setup so we have one, two, three in the dest
@ -1865,11 +1896,12 @@ func TestSyncUTFNorm(t *testing.T) {
// Test --immutable // Test --immutable
func TestSyncImmutable(t *testing.T) { func TestSyncImmutable(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.Immutable = true ci.Immutable = true
defer func() { fs.Config.Immutable = false }() defer func() { ci.Immutable = false }()
// Create file on source // Create file on source
file1 := r.WriteFile("existing", "potato", t1) file1 := r.WriteFile("existing", "potato", t1)
@ -1899,6 +1931,7 @@ func TestSyncImmutable(t *testing.T) {
// Test --ignore-case-sync // Test --ignore-case-sync
func TestSyncIgnoreCase(t *testing.T) { func TestSyncIgnoreCase(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
@ -1907,8 +1940,8 @@ func TestSyncIgnoreCase(t *testing.T) {
t.Skip("Skipping test as local or remote are case-insensitive") t.Skip("Skipping test as local or remote are case-insensitive")
} }
fs.Config.IgnoreCaseSync = true ci.IgnoreCaseSync = true
defer func() { fs.Config.IgnoreCaseSync = false }() defer func() { ci.IgnoreCaseSync = false }()
// Create files with different filename casing // Create files with different filename casing
file1 := r.WriteFile("existing", "potato", t1) file1 := r.WriteFile("existing", "potato", t1)
@ -1927,25 +1960,26 @@ func TestSyncIgnoreCase(t *testing.T) {
// Test that aborting on --max-transfer works // Test that aborting on --max-transfer works
func TestMaxTransfer(t *testing.T) { func TestMaxTransfer(t *testing.T) {
ctx := context.Background() ctx := context.Background()
oldMaxTransfer := fs.Config.MaxTransfer ci := fs.GetConfig(ctx)
oldTransfers := fs.Config.Transfers oldMaxTransfer := ci.MaxTransfer
oldCheckers := fs.Config.Checkers oldTransfers := ci.Transfers
oldCutoff := fs.Config.CutoffMode oldCheckers := ci.Checkers
fs.Config.MaxTransfer = 3 * 1024 oldCutoff := ci.CutoffMode
fs.Config.Transfers = 1 ci.MaxTransfer = 3 * 1024
fs.Config.Checkers = 1 ci.Transfers = 1
fs.Config.CutoffMode = fs.CutoffModeHard ci.Checkers = 1
ci.CutoffMode = fs.CutoffModeHard
defer func() { defer func() {
fs.Config.MaxTransfer = oldMaxTransfer ci.MaxTransfer = oldMaxTransfer
fs.Config.Transfers = oldTransfers ci.Transfers = oldTransfers
fs.Config.Checkers = oldCheckers ci.Checkers = oldCheckers
fs.Config.CutoffMode = oldCutoff ci.CutoffMode = oldCutoff
}() }()
test := func(t *testing.T, cutoff fs.CutoffMode) { test := func(t *testing.T, cutoff fs.CutoffMode) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
fs.Config.CutoffMode = cutoff ci.CutoffMode = cutoff
if r.Fremote.Name() != "local" { if r.Fremote.Name() != "local" {
t.Skip("This test only runs on local") t.Skip("This test only runs on local")

View File

@ -59,11 +59,12 @@ type Func func(path string, entries fs.DirEntries, err error) error
// //
// NB (f, path) to be replaced by fs.Dir at some point // NB (f, path) to be replaced by fs.Dir at some point
func Walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { func Walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
if fs.Config.NoTraverse && filter.Active.HaveFilesFrom() { ci := fs.GetConfig(ctx)
if ci.NoTraverse && filter.Active.HaveFilesFrom() {
return walkR(ctx, f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(ctx, f.NewObject)) return walkR(ctx, f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(ctx, f.NewObject))
} }
// FIXME should this just be maxLevel < 0 - why the maxLevel > 1 // FIXME should this just be maxLevel < 0 - why the maxLevel > 1
if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil { if (maxLevel < 0 || maxLevel > 1) && ci.UseListR && f.Features().ListR != nil {
return walkListR(ctx, f, path, includeAll, maxLevel, fn) return walkListR(ctx, f, path, includeAll, maxLevel, fn)
} }
return walkListDirSorted(ctx, f, path, includeAll, maxLevel, fn) return walkListDirSorted(ctx, f, path, includeAll, maxLevel, fn)
@ -353,10 +354,11 @@ type listDirFunc func(ctx context.Context, fs fs.Fs, includeAll bool, dir string
func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error { func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error {
var ( var (
wg sync.WaitGroup // sync closing of go routines wg sync.WaitGroup // sync closing of go routines
traversing sync.WaitGroup // running directory traversals traversing sync.WaitGroup // running directory traversals
doClose sync.Once // close the channel once doClose sync.Once // close the channel once
mu sync.Mutex // stop fn being called concurrently mu sync.Mutex // stop fn being called concurrently
ci = fs.GetConfig(ctx) // current config
) )
// listJob describe a directory listing that needs to be done // listJob describe a directory listing that needs to be done
type listJob struct { type listJob struct {
@ -364,7 +366,7 @@ func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel i
depth int depth int
} }
in := make(chan listJob, fs.Config.Checkers) in := make(chan listJob, ci.Checkers)
errs := make(chan error, 1) errs := make(chan error, 1)
quit := make(chan struct{}) quit := make(chan struct{})
closeQuit := func() { closeQuit := func() {
@ -377,7 +379,7 @@ func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel i
}() }()
}) })
} }
for i := 0; i < fs.Config.Checkers; i++ { for i := 0; i < ci.Checkers; i++ {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
@ -553,8 +555,9 @@ func walkNDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, ma
// //
// NB (f, path) to be replaced by fs.Dir at some point // NB (f, path) to be replaced by fs.Dir at some point
func NewDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int) (dirtree.DirTree, error) { func NewDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int) (dirtree.DirTree, error) {
ci := fs.GetConfig(ctx)
// if --no-traverse and --files-from build DirTree just from files // if --no-traverse and --files-from build DirTree just from files
if fs.Config.NoTraverse && filter.Active.HaveFilesFrom() { if ci.NoTraverse && filter.Active.HaveFilesFrom() {
return walkRDirTree(ctx, f, path, includeAll, maxLevel, filter.Active.MakeListR(ctx, f.NewObject)) return walkRDirTree(ctx, f, path, includeAll, maxLevel, filter.Active.MakeListR(ctx, f.NewObject))
} }
// if have ListR; and recursing; and not using --files-from; then build a DirTree with ListR // if have ListR; and recursing; and not using --files-from; then build a DirTree with ListR

View File

@ -59,10 +59,11 @@ func init() {
// Initialise rclone for testing // Initialise rclone for testing
func Initialise() { func Initialise() {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx)
// Never ask for passwords, fail instead. // Never ask for passwords, fail instead.
// If your local config is encrypted set environment variable // If your local config is encrypted set environment variable
// "RCLONE_CONFIG_PASS=hunter2" (or your password) // "RCLONE_CONFIG_PASS=hunter2" (or your password)
fs.Config.AskPassword = false ci.AskPassword = false
// Override the config file from the environment - we don't // Override the config file from the environment - we don't
// parse the flags any more so this doesn't happen // parse the flags any more so this doesn't happen
// automatically // automatically
@ -71,16 +72,16 @@ func Initialise() {
} }
config.LoadConfig(ctx) config.LoadConfig(ctx)
if *Verbose { if *Verbose {
fs.Config.LogLevel = fs.LogLevelDebug ci.LogLevel = fs.LogLevelDebug
} }
if *DumpHeaders { if *DumpHeaders {
fs.Config.Dump |= fs.DumpHeaders ci.Dump |= fs.DumpHeaders
} }
if *DumpBodies { if *DumpBodies {
fs.Config.Dump |= fs.DumpBodies ci.Dump |= fs.DumpBodies
} }
fs.Config.LowLevelRetries = *LowLevelRetries ci.LowLevelRetries = *LowLevelRetries
fs.Config.UseListR = *UseListR ci.UseListR = *UseListR
} }
// Item represents an item for checking // Item represents an item for checking

View File

@ -295,6 +295,7 @@ func Run(t *testing.T, opt *Opt) {
isLocalRemote bool isLocalRemote bool
purged bool // whether the dir has been purged or not purged bool // whether the dir has been purged or not
ctx = context.Background() ctx = context.Background()
ci = fs.GetConfig(ctx)
unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever
) )
@ -316,10 +317,10 @@ func Run(t *testing.T, opt *Opt) {
if remote.Features().ListR == nil { if remote.Features().ListR == nil {
t.Skip("FS has no ListR interface") t.Skip("FS has no ListR interface")
} }
previous := fs.Config.UseListR previous := ci.UseListR
fs.Config.UseListR = true ci.UseListR = true
return func() { return func() {
fs.Config.UseListR = previous ci.UseListR = previous
} }
} }

View File

@ -4,6 +4,7 @@ package main
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"go/build" "go/build"
"io" "io"
@ -345,9 +346,10 @@ func (r *Run) Init() {
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(listRetries)) r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(listRetries))
} }
r.Try = 1 r.Try = 1
ci := fs.GetConfig(context.Background())
if *verbose { if *verbose {
r.CmdLine = append(r.CmdLine, "-verbose") r.CmdLine = append(r.CmdLine, "-verbose")
fs.Config.LogLevel = fs.LogLevelDebug ci.LogLevel = fs.LogLevelDebug
} }
if *runOnly != "" { if *runOnly != "" {
r.CmdLine = append(r.CmdLine, prefix+"run", *runOnly) r.CmdLine = append(r.CmdLine, prefix+"run", *runOnly)

View File

@ -353,7 +353,7 @@ func NewClientWithBaseClient(ctx context.Context, name string, m configmap.Mappe
// NewClient gets a token from the config file and configures a Client // NewClient gets a token from the config file and configures a Client
// with it. It returns the client and a TokenSource which Invalidate may need to be called on // with it. It returns the client and a TokenSource which Invalidate may need to be called on
func NewClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) { func NewClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
return NewClientWithBaseClient(ctx, name, m, oauthConfig, fshttp.NewClient(fs.Config)) return NewClientWithBaseClient(ctx, name, m, oauthConfig, fshttp.NewClient(fs.GetConfig(ctx)))
} }
// AuthResult is returned from the web server after authorization // AuthResult is returned from the web server after authorization
@ -526,7 +526,7 @@ version recommended):
} }
// Exchange the code for a token // Exchange the code for a token
ctx = Context(ctx, fshttp.NewClient(fs.Config)) ctx = Context(ctx, fshttp.NewClient(fs.GetConfig(ctx)))
token, err := oauthConfig.Exchange(ctx, auth.Code) token, err := oauthConfig.Exchange(ctx, auth.Code)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to get token") return errors.Wrap(err, "failed to get token")

View File

@ -276,6 +276,7 @@ func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
retries := 0 retries := 0
reqSize := len(p) reqSize := len(p)
doReopen := false doReopen := false
lowLevelRetries := fs.GetConfig(context.TODO()).LowLevelRetries
for { for {
if doSeek { if doSeek {
// Are we attempting to seek beyond the end of the // Are we attempting to seek beyond the end of the
@ -312,11 +313,11 @@ func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
break break
} }
} }
if retries >= fs.Config.LowLevelRetries { if retries >= lowLevelRetries {
break break
} }
retries++ retries++
fs.Errorf(fh.remote, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, fs.Config.LowLevelRetries, err) fs.Errorf(fh.remote, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, lowLevelRetries, err)
doSeek = true doSeek = true
doReopen = true doReopen = true
} }

View File

@ -95,7 +95,7 @@ func New(ctx context.Context, fremote fs.Fs, opt *vfscommon.Options, avFn AddVir
return nil, errors.Wrap(err, "failed to create cache meta remote") return nil, errors.Wrap(err, "failed to create cache meta remote")
} }
hashType, hashOption := operations.CommonHash(fcache, fremote) hashType, hashOption := operations.CommonHash(ctx, fcache, fremote)
c := &Cache{ c := &Cache{
fremote: fremote, fremote: fremote,

View File

@ -283,7 +283,7 @@ func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) {
// defer log.Trace(dls.src, "r=%v", r)("err=%v", &err) // defer log.Trace(dls.src, "r=%v", r)("err=%v", &err)
// The window includes potentially unread data in the buffer // The window includes potentially unread data in the buffer
window := int64(fs.Config.BufferSize) window := int64(fs.GetConfig(context.TODO()).BufferSize)
// Increase the read range by the read ahead if set // Increase the read range by the read ahead if set
if dls.opt.ReadAhead > 0 { if dls.opt.ReadAhead > 0 {
@ -521,7 +521,7 @@ func (dl *downloader) open(offset int64) (err error) {
// if offset > 0 { // if offset > 0 {
// rangeOption = &fs.RangeOption{Start: offset, End: size - 1} // rangeOption = &fs.RangeOption{Start: offset, End: size - 1}
// } // }
// in0, err := operations.NewReOpen(dl.dls.ctx, dl.dls.src, fs.Config.LowLevelRetries, dl.dls.item.c.hashOption, rangeOption) // in0, err := operations.NewReOpen(dl.dls.ctx, dl.dls.src, ci.LowLevelRetries, dl.dls.item.c.hashOption, rangeOption)
in0 := chunkedreader.New(context.TODO(), dl.dls.src, int64(dl.dls.opt.ChunkSize), int64(dl.dls.opt.ChunkSizeLimit)) in0 := chunkedreader.New(context.TODO(), dl.dls.src, int64(dl.dls.opt.ChunkSize), int64(dl.dls.opt.ChunkSizeLimit))
_, err = in0.Seek(offset, 0) _, err = in0.Seek(offset, 0)

View File

@ -491,7 +491,7 @@ func (item *Item) _createFile(osPath string) (err error) {
// Open the local file from the object passed in. Wraps open() // Open the local file from the object passed in. Wraps open()
// to provide recovery from out of space error. // to provide recovery from out of space error.
func (item *Item) Open(o fs.Object) (err error) { func (item *Item) Open(o fs.Object) (err error) {
for retries := 0; retries < fs.Config.LowLevelRetries; retries++ { for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ {
item.preAccess() item.preAccess()
err = item.open(o) err = item.open(o)
item.postAccess() item.postAccess()
@ -1190,7 +1190,7 @@ func (item *Item) setModTime(modTime time.Time) {
func (item *Item) ReadAt(b []byte, off int64) (n int, err error) { func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
n = 0 n = 0
var expBackOff int var expBackOff int
for retries := 0; retries < fs.Config.LowLevelRetries; retries++ { for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ {
item.preAccess() item.preAccess()
n, err = item.readAt(b, off) n, err = item.readAt(b, off)
item.postAccess() item.postAccess()

View File

@ -416,7 +416,7 @@ func (wb *WriteBack) processItems(ctx context.Context) {
resetTimer := true resetTimer := true
for wbItem := wb._peekItem(); wbItem != nil && time.Until(wbItem.expiry) <= 0; wbItem = wb._peekItem() { for wbItem := wb._peekItem(); wbItem != nil && time.Until(wbItem.expiry) <= 0; wbItem = wb._peekItem() {
// If reached transfer limit don't restart the timer // If reached transfer limit don't restart the timer
if wb.uploads >= fs.Config.Transfers { if wb.uploads >= fs.GetConfig(context.TODO()).Transfers {
fs.Debugf(wbItem.name, "vfs cache: delaying writeback as --transfers exceeded") fs.Debugf(wbItem.name, "vfs cache: delaying writeback as --transfers exceeded")
resetTimer = false resetTimer = false
break break

View File

@ -493,10 +493,12 @@ func TestWriteBackGetStats(t *testing.T) {
// Test queuing more than fs.Config.Transfers // Test queuing more than fs.Config.Transfers
func TestWriteBackMaxQueue(t *testing.T) { func TestWriteBackMaxQueue(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
wb, cancel := newTestWriteBack(t) wb, cancel := newTestWriteBack(t)
defer cancel() defer cancel()
maxTransfers := fs.Config.Transfers maxTransfers := ci.Transfers
toTransfer := maxTransfers + 2 toTransfer := maxTransfers + 2
// put toTransfer things in the queue // put toTransfer things in the queue