diff --git a/backend/alias/alias.go b/backend/alias/alias.go index e92135e99..3fcf6dc14 100644 --- a/backend/alias/alias.go +++ b/backend/alias/alias.go @@ -7,7 +7,8 @@ import ( "strings" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" ) // Register with Fs @@ -17,29 +18,42 @@ func init() { Description: "Alias for a existing remote", NewFs: NewFs, Options: []fs.Option{{ - Name: "remote", - Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".", + Name: "remote", + Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".", + Required: true, }}, } fs.Register(fsi) } +// Options defines the configuration for this backend +type Options struct { + Remote string `config:"remote"` +} + // NewFs contstructs an Fs from the path. // // The returned Fs is the actual Fs, referenced by remote in the config -func NewFs(name, root string) (fs.Fs, error) { - remote := config.FileGet(name, "remote") - if remote == "" { - return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting") - } - if strings.HasPrefix(remote, name+":") { - return nil, errors.New("can't point alias remote at itself - check the value of the remote setting") - } - fsInfo, configName, fsPath, err := fs.ParseRemote(remote) +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) if err != nil { return nil, err } - - root = filepath.ToSlash(root) - return fsInfo.NewFs(configName, path.Join(fsPath, root)) + if opt.Remote == "" { + return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting") + } + if strings.HasPrefix(opt.Remote, name+":") { + return nil, errors.New("can't point alias remote at itself - check the value of the remote setting") + } + _, configName, fsPath, err := fs.ParseRemote(opt.Remote) + if err != nil { + return nil, err + } + root = path.Join(fsPath, filepath.ToSlash(root)) + if configName == "local" { + return fs.NewFs(root) + } + return fs.NewFs(configName + ":" + root) } diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go index 35008430f..22201febf 100644 --- a/backend/amazonclouddrive/amazonclouddrive.go +++ b/backend/amazonclouddrive/amazonclouddrive.go @@ -24,7 +24,8 @@ import ( "github.com/ncw/go-acd" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" @@ -37,19 +38,17 @@ import ( ) const ( - folderKind = "FOLDER" - fileKind = "FILE" - statusAvailable = "AVAILABLE" - timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z - minSleep = 20 * time.Millisecond - warnFileSize = 50000 << 20 // Display warning for files larger than this size + folderKind = "FOLDER" + fileKind = "FILE" + statusAvailable = "AVAILABLE" + timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z + minSleep = 20 * time.Millisecond + warnFileSize = 50000 << 20 // Display warning for files larger than this size + defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink ) // Globals var ( - // Flags - tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink - uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.") // Description of how to auth for this app acdConfig = &oauth2.Config{ Scopes: []string{"clouddrive:read_all", "clouddrive:write"}, @@ -67,35 +66,62 @@ var ( func init() { fs.Register(&fs.RegInfo{ Name: "amazon cloud drive", + Prefix: "acd", Description: "Amazon Drive", NewFs: NewFs, - Config: func(name string) { - err := oauthutil.Config("amazon cloud drive", name, acdConfig) + Config: func(name string, m configmap.Mapper) { + err := oauthutil.Config("amazon cloud drive", name, m, acdConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } }, Options: []fs.Option{{ - Name: config.ConfigClientID, - Help: "Amazon Application Client Id - required.", + Name: config.ConfigClientID, + Help: "Amazon Application Client ID.", + Required: true, }, { - Name: config.ConfigClientSecret, - Help: "Amazon Application Client Secret - required.", + Name: config.ConfigClientSecret, + Help: "Amazon Application Client Secret.", + Required: true, }, { - Name: config.ConfigAuthURL, - Help: "Auth server URL - leave blank to use Amazon's.", + Name: config.ConfigAuthURL, + Help: "Auth server URL.\nLeave blank to use Amazon's.", + Advanced: true, }, { - Name: config.ConfigTokenURL, - Help: "Token server url - leave blank to use Amazon's.", + Name: config.ConfigTokenURL, + Help: "Token server url.\nleave blank to use Amazon's.", + Advanced: true, + }, { + Name: "checkpoint", + Help: "Checkpoint for internal polling (debug).", + Hide: fs.OptionHideBoth, + Advanced: true, + }, { + Name: "upload_wait_per_gb", + Help: "Additional time per GB to wait after a failed complete upload to see if it appears.", + Default: fs.Duration(180 * time.Second), + Advanced: true, + }, { + Name: "templink_threshold", + Help: "Files >= this size will be downloaded via their tempLink.", + Default: defaultTempLinkThreshold, + Advanced: true, }}, }) - flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.") +} + +// Options defines the configuration for this backend +type Options struct { + Checkpoint string `config:"checkpoint"` + UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"` + TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"` } // Fs represents a remote acd server type Fs struct { name string // name of this remote features *fs.Features // optional features + opt Options // options for this Fs c *acd.Client // the connection to the acd server noAuthClient *http.Client // unauthenticated http client root string // the path we are working on @@ -191,7 +217,13 @@ func filterRequest(req *http.Request) { } // NewFs constructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } root = parsePath(root) baseClient := fshttp.NewClient(fs.Config) if do, ok := baseClient.Transport.(interface { @@ -201,7 +233,7 @@ func NewFs(name, root string) (fs.Fs, error) { } else { fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail") } - oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient) + oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient) if err != nil { log.Fatalf("Failed to configure Amazon Drive: %v", err) } @@ -210,6 +242,7 @@ func NewFs(name, root string) (fs.Fs, error) { f := &Fs{ name: name, root: root, + opt: *opt, c: c, pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer), noAuthClient: fshttp.NewClient(fs.Config), @@ -527,13 +560,13 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i } // Don't wait for uploads - assume they will appear later - if *uploadWaitPerGB <= 0 { + if f.opt.UploadWaitPerGB <= 0 { fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus) return false, inInfo, inErr } // Time we should wait for the upload - uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024 + uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024 timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size())) const sleepTime = 5 * time.Second // sleep between tries @@ -1015,7 +1048,7 @@ func (o *Object) Storable() bool { // Open an object for read func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { - bigObject := o.Size() >= int64(tempLinkThreshold) + bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold) if bigObject { fs.Debugf(o, "Downloading large object via tempLink") } @@ -1208,7 +1241,7 @@ func (o *Object) MimeType() string { // // Close the returned channel to stop being notified. func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool { - checkpoint := config.FileGet(f.name, "checkpoint") + checkpoint := f.opt.Checkpoint quit := make(chan bool) go func() { diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index ca2faaa8f..bc5b721a9 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -24,8 +24,8 @@ import ( "github.com/Azure/azure-storage-blob-go/2018-03-28/azblob" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/accounting" - "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/walk" @@ -44,14 +44,10 @@ const ( maxTotalParts = 50000 // in multipart upload storageDefaultBaseURL = "blob.core.windows.net" // maxUncommittedSize = 9 << 30 // can't upload bigger than this -) - -// Globals -var ( - maxChunkSize = fs.SizeSuffix(100 * 1024 * 1024) - chunkSize = fs.SizeSuffix(4 * 1024 * 1024) - uploadCutoff = fs.SizeSuffix(256 * 1024 * 1024) - maxUploadCutoff = fs.SizeSuffix(256 * 1024 * 1024) + defaultChunkSize = 4 * 1024 * 1024 + maxChunkSize = 100 * 1024 * 1024 + defaultUploadCutoff = 256 * 1024 * 1024 + maxUploadCutoff = 256 * 1024 * 1024 ) // Register with Fs @@ -70,22 +66,39 @@ func init() { Name: "sas_url", Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)", }, { - Name: "endpoint", - Help: "Endpoint for the service - leave blank normally.", - }, - }, + Name: "endpoint", + Help: "Endpoint for the service\nLeave blank normally.", + Advanced: true, + }, { + Name: "upload_cutoff", + Help: "Cutoff for switching to chunked upload.", + Default: fs.SizeSuffix(defaultUploadCutoff), + Advanced: true, + }, { + Name: "chunk_size", + Help: "Upload chunk size. Must fit in memory.", + Default: fs.SizeSuffix(defaultChunkSize), + Advanced: true, + }}, }) - flags.VarP(&uploadCutoff, "azureblob-upload-cutoff", "", "Cutoff for switching to chunked upload") - flags.VarP(&chunkSize, "azureblob-chunk-size", "", "Upload chunk size. Must fit in memory.") +} + +// Options defines the configuration for this backend +type Options struct { + Account string `config:"account"` + Key string `config:"key"` + Endpoint string `config:"endpoint"` + SASURL string `config:"sas_url"` + UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` } // Fs represents a remote azure server type Fs struct { name string // name of this remote root string // the path we are working on if any + opt Options // parsed config options features *fs.Features // optional features - account string // account name - endpoint string // name of the starting api endpoint svcURL *azblob.ServiceURL // reference to serviceURL cntURL *azblob.ContainerURL // reference to containerURL container string // the container we are working on @@ -177,21 +190,27 @@ func (f *Fs) shouldRetry(err error) (bool, error) { } // NewFs contstructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { - if uploadCutoff > maxUploadCutoff { - return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", uploadCutoff, maxUploadCutoff) +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err } - if chunkSize > maxChunkSize { - return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, chunkSize) + + if opt.UploadCutoff > maxUploadCutoff { + return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff) + } + if opt.ChunkSize > maxChunkSize { + return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize) } container, directory, err := parsePath(root) if err != nil { return nil, err } - account := config.FileGet(name, "account") - key := config.FileGet(name, "key") - sasURL := config.FileGet(name, "sas_url") - endpoint := config.FileGet(name, "endpoint", storageDefaultBaseURL) + if opt.Endpoint == "" { + opt.Endpoint = storageDefaultBaseURL + } var ( u *url.URL @@ -199,17 +218,17 @@ func NewFs(name, root string) (fs.Fs, error) { containerURL azblob.ContainerURL ) switch { - case account != "" && key != "": - credential := azblob.NewSharedKeyCredential(account, key) - u, err = url.Parse(fmt.Sprintf("https://%s.%s", account, endpoint)) + case opt.Account != "" && opt.Key != "": + credential := azblob.NewSharedKeyCredential(opt.Account, opt.Key) + u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) if err != nil { return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") } pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{}) serviceURL = azblob.NewServiceURL(*u, pipeline) containerURL = serviceURL.NewContainerURL(container) - case sasURL != "": - u, err = url.Parse(sasURL) + case opt.SASURL != "": + u, err = url.Parse(opt.SASURL) if err != nil { return nil, errors.Wrapf(err, "failed to parse SAS URL") } @@ -234,10 +253,9 @@ func NewFs(name, root string) (fs.Fs, error) { f := &Fs{ name: name, + opt: *opt, container: container, root: directory, - account: account, - endpoint: endpoint, svcURL: &serviceURL, cntURL: &containerURL, pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), @@ -990,7 +1008,7 @@ type readSeeker struct { // Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList. func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) { // Calculate correct chunkSize - chunkSize := int64(chunkSize) + chunkSize := int64(o.fs.opt.ChunkSize) var totalParts int64 for { // Calculate number of parts @@ -1147,7 +1165,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio httpHeaders.ContentType = fs.MimeType(o) // Multipart upload doesn't support MD5 checksums at put block calls, hence calculate // MD5 only for PutBlob requests - if size < int64(uploadCutoff) { + if size < int64(o.fs.opt.UploadCutoff) { if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" { sourceMD5bytes, err := hex.DecodeString(sourceMD5) if err == nil { @@ -1159,7 +1177,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } putBlobOptions := azblob.UploadStreamToBlockBlobOptions{ - BufferSize: int(chunkSize), + BufferSize: int(o.fs.opt.ChunkSize), MaxBuffers: 4, Metadata: o.meta, BlobHTTPHeaders: httpHeaders, @@ -1168,7 +1186,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio ctx := context.Background() // Don't retry, return a retry error instead err = o.fs.pacer.CallNoRetry(func() (bool, error) { - if size >= int64(uploadCutoff) { + if size >= int64(o.fs.opt.UploadCutoff) { // If a large file upload in chunks err = o.uploadMultipart(in, size, &blob, &httpHeaders) } else { diff --git a/backend/b2/b2.go b/backend/b2/b2.go index a876d5083..4e2a70c98 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -22,8 +22,8 @@ import ( "github.com/ncw/rclone/backend/b2/api" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/accounting" - "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" @@ -34,30 +34,27 @@ import ( ) const ( - defaultEndpoint = "https://api.backblazeb2.com" - headerPrefix = "x-bz-info-" // lower case as that is what the server returns - timeKey = "src_last_modified_millis" - timeHeader = headerPrefix + timeKey - sha1Key = "large_file_sha1" - sha1Header = "X-Bz-Content-Sha1" - sha1InfoHeader = headerPrefix + sha1Key - testModeHeader = "X-Bz-Test-Mode" - retryAfterHeader = "Retry-After" - minSleep = 10 * time.Millisecond - maxSleep = 5 * time.Minute - decayConstant = 1 // bigger for slower decay, exponential - maxParts = 10000 - maxVersions = 100 // maximum number of versions we search in --b2-versions mode + defaultEndpoint = "https://api.backblazeb2.com" + headerPrefix = "x-bz-info-" // lower case as that is what the server returns + timeKey = "src_last_modified_millis" + timeHeader = headerPrefix + timeKey + sha1Key = "large_file_sha1" + sha1Header = "X-Bz-Content-Sha1" + sha1InfoHeader = headerPrefix + sha1Key + testModeHeader = "X-Bz-Test-Mode" + retryAfterHeader = "Retry-After" + minSleep = 10 * time.Millisecond + maxSleep = 5 * time.Minute + decayConstant = 1 // bigger for slower decay, exponential + maxParts = 10000 + maxVersions = 100 // maximum number of versions we search in --b2-versions mode + minChunkSize = 5E6 + defaultChunkSize = 96 * 1024 * 1024 + defaultUploadCutoff = 200E6 ) // Globals var ( - minChunkSize = fs.SizeSuffix(5E6) - chunkSize = fs.SizeSuffix(96 * 1024 * 1024) - uploadCutoff = fs.SizeSuffix(200E6) - b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.") - b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.") - b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.") errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode") ) @@ -68,29 +65,64 @@ func init() { Description: "Backblaze B2", NewFs: NewFs, Options: []fs.Option{{ - Name: "account", - Help: "Account ID", + Name: "account", + Help: "Account ID", + Required: true, }, { - Name: "key", - Help: "Application Key", + Name: "key", + Help: "Application Key", + Required: true, }, { - Name: "endpoint", - Help: "Endpoint for the service - leave blank normally.", - }, - }, + Name: "endpoint", + Help: "Endpoint for the service.\nLeave blank normally.", + Advanced: true, + }, { + Name: "test_mode", + Help: "A flag string for X-Bz-Test-Mode header for debugging.", + Default: "", + Hide: fs.OptionHideConfigurator, + Advanced: true, + }, { + Name: "versions", + Help: "Include old versions in directory listings.", + Default: false, + Advanced: true, + }, { + Name: "hard_delete", + Help: "Permanently delete files on remote removal, otherwise hide files.", + Default: false, + }, { + Name: "upload_cutoff", + Help: "Cutoff for switching to chunked upload.", + Default: fs.SizeSuffix(defaultUploadCutoff), + Advanced: true, + }, { + Name: "chunk_size", + Help: "Upload chunk size. Must fit in memory.", + Default: fs.SizeSuffix(defaultChunkSize), + Advanced: true, + }}, }) - flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload") - flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.") +} + +// Options defines the configuration for this backend +type Options struct { + Account string `config:"account"` + Key string `config:"key"` + Endpoint string `config:"endpoint"` + TestMode string `config:"test_mode"` + Versions bool `config:"versions"` + HardDelete bool `config:"hard_delete"` + UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` } // Fs represents a remote b2 server type Fs struct { name string // name of this remote root string // the path we are working on if any + opt Options // parsed config options features *fs.Features // optional features - account string // account name - key string // auth key - endpoint string // name of the starting api endpoint srv *rest.Client // the connection to the b2 server bucket string // the bucket we are working on bucketOKMu sync.Mutex // mutex to protect bucket OK @@ -232,33 +264,37 @@ func errorHandler(resp *http.Response) error { } // NewFs contstructs an Fs from the path, bucket:path -func NewFs(name, root string) (fs.Fs, error) { - if uploadCutoff < chunkSize { - return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize) +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err } - if chunkSize < minChunkSize { - return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize) + if opt.UploadCutoff < opt.ChunkSize { + return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize) + } + if opt.ChunkSize < minChunkSize { + return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize) } bucket, directory, err := parsePath(root) if err != nil { return nil, err } - account := config.FileGet(name, "account") - if account == "" { + if opt.Account == "" { return nil, errors.New("account not found") } - key := config.FileGet(name, "key") - if key == "" { + if opt.Key == "" { return nil, errors.New("key not found") } - endpoint := config.FileGet(name, "endpoint", defaultEndpoint) + if opt.Endpoint == "" { + opt.Endpoint = defaultEndpoint + } f := &Fs{ name: name, + opt: *opt, bucket: bucket, root: directory, - account: account, - key: key, - endpoint: endpoint, srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), bufferTokens: make(chan []byte, fs.Config.Transfers), @@ -269,8 +305,8 @@ func NewFs(name, root string) (fs.Fs, error) { BucketBased: true, }).Fill(f) // Set the test flag if required - if *b2TestMode != "" { - testMode := strings.TrimSpace(*b2TestMode) + if opt.TestMode != "" { + testMode := strings.TrimSpace(opt.TestMode) f.srv.SetHeader(testModeHeader, testMode) fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode) } @@ -316,9 +352,9 @@ func (f *Fs) authorizeAccount() error { opts := rest.Opts{ Method: "GET", Path: "/b2api/v1/b2_authorize_account", - RootURL: f.endpoint, - UserName: f.account, - Password: f.key, + RootURL: f.opt.Endpoint, + UserName: f.opt.Account, + Password: f.opt.Key, ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request } err := f.pacer.Call(func() (bool, error) { @@ -384,7 +420,7 @@ func (f *Fs) clearUploadURL() { func (f *Fs) getUploadBlock() []byte { buf := <-f.bufferTokens if buf == nil { - buf = make([]byte, chunkSize) + buf = make([]byte, f.opt.ChunkSize) } // fs.Debugf(f, "Getting upload block %p", buf) return buf @@ -393,7 +429,7 @@ func (f *Fs) getUploadBlock() []byte { // putUploadBlock returns a block to the pool of size chunkSize func (f *Fs) putUploadBlock(buf []byte) { buf = buf[:cap(buf)] - if len(buf) != int(chunkSize) { + if len(buf) != int(f.opt.ChunkSize) { panic("bad blocksize returned to pool") } // fs.Debugf(f, "Returning upload block %p", buf) @@ -563,7 +599,7 @@ func (f *Fs) markBucketOK() { // listDir lists a single directory func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { last := "" - err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error { + err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory, &last) if err != nil { return err @@ -635,7 +671,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { } list := walk.NewListRHelper(callback) last := "" - err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error { + err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory, &last) if err != nil { return err @@ -1035,12 +1071,12 @@ func (o *Object) readMetaData() (err error) { maxSearched := 1 var timestamp api.Timestamp baseRemote := o.remote - if *b2Versions { + if o.fs.opt.Versions { timestamp, baseRemote = api.RemoveVersion(baseRemote) maxSearched = maxVersions } var info *api.File - err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error { + err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { if isDirectory { return nil } @@ -1254,7 +1290,7 @@ func urlEncode(in string) string { // // The new object may have been created if an error is returned func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { - if *b2Versions { + if o.fs.opt.Versions { return errNotWithVersions } err = o.fs.Mkdir("") @@ -1289,7 +1325,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } else { return err } - } else if size > int64(uploadCutoff) { + } else if size > int64(o.fs.opt.UploadCutoff) { up, err := o.fs.newLargeUpload(o, in, src) if err != nil { return err @@ -1408,10 +1444,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // Remove an object func (o *Object) Remove() error { - if *b2Versions { + if o.fs.opt.Versions { return errNotWithVersions } - if *b2HardDelete { + if o.fs.opt.HardDelete { return o.fs.deleteByID(o.id, o.fs.root+o.remote) } return o.fs.hide(o.fs.root + o.remote) diff --git a/backend/b2/upload.go b/backend/b2/upload.go index 44019fa92..76b2f32f5 100644 --- a/backend/b2/upload.go +++ b/backend/b2/upload.go @@ -86,10 +86,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar parts := int64(0) sha1SliceSize := int64(maxParts) if size == -1 { - fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize)) + fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) } else { - parts = size / int64(chunkSize) - if size%int64(chunkSize) != 0 { + parts = size / int64(o.fs.opt.ChunkSize) + if size%int64(o.fs.opt.ChunkSize) != 0 { parts++ } if parts > maxParts { @@ -409,8 +409,8 @@ outer: } reqSize := remaining - if reqSize >= int64(chunkSize) { - reqSize = int64(chunkSize) + if reqSize >= int64(up.f.opt.ChunkSize) { + reqSize = int64(up.f.opt.ChunkSize) } // Get a block of memory diff --git a/backend/box/box.go b/backend/box/box.go index fde034ac7..c9cae7c90 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -23,7 +23,8 @@ import ( "github.com/ncw/rclone/backend/box/api" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/hash" @@ -46,6 +47,7 @@ const ( uploadURL = "https://upload.box.com/api/2.0" listChunks = 1000 // chunk size to read directory listings minUploadCutoff = 50000000 // upload cutoff can be no lower than this + defaultUploadCutoff = 50 * 1024 * 1024 ) // Globals @@ -61,7 +63,6 @@ var ( ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), RedirectURL: oauthutil.RedirectURL, } - uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024) ) // Register with Fs @@ -70,27 +71,37 @@ func init() { Name: "box", Description: "Box", NewFs: NewFs, - Config: func(name string) { - err := oauthutil.Config("box", name, oauthConfig) + Config: func(name string, m configmap.Mapper) { + err := oauthutil.Config("box", name, m, oauthConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } }, Options: []fs.Option{{ Name: config.ConfigClientID, - Help: "Box App Client Id - leave blank normally.", + Help: "Box App Client Id.\nLeave blank normally.", }, { Name: config.ConfigClientSecret, - Help: "Box App Client Secret - leave blank normally.", + Help: "Box App Client Secret\nLeave blank normally.", + }, { + Name: "upload_cutoff", + Help: "Cutoff for switching to multipart upload.", + Default: fs.SizeSuffix(defaultUploadCutoff), + Advanced: true, }}, }) - flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload") +} + +// Options defines the configuration for this backend +type Options struct { + UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` } // Fs represents a remote box type Fs struct { name string // name of this remote root string // the path we are working on + opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the one drive server dirCache *dircache.DirCache // Map of directory path to directory id @@ -219,13 +230,20 @@ func errorHandler(resp *http.Response) error { } // NewFs constructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { - if uploadCutoff < minUploadCutoff { - return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", uploadCutoff, fs.SizeSuffix(minUploadCutoff)) +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + if opt.UploadCutoff < minUploadCutoff { + return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff)) } root = parsePath(root) - oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig) + oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig) if err != nil { log.Fatalf("Failed to configure Box: %v", err) } @@ -233,6 +251,7 @@ func NewFs(name, root string) (fs.Fs, error) { f := &Fs{ name: name, root: root, + opt: *opt, srv: rest.NewClient(oAuthClient).SetRoot(rootURL), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers), @@ -1035,7 +1054,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } // Upload with simple or multipart - if size <= int64(uploadCutoff) { + if size <= int64(o.fs.opt.UploadCutoff) { err = o.upload(in, leaf, directoryID, modTime) } else { err = o.uploadMultipart(in, leaf, directoryID, size, modTime) diff --git a/backend/cache/cache.go b/backend/cache/cache.go index f1ae38f95..d7541c080 100644 --- a/backend/cache/cache.go +++ b/backend/cache/cache.go @@ -18,7 +18,8 @@ import ( "github.com/ncw/rclone/backend/crypt" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/rc" @@ -30,13 +31,13 @@ import ( const ( // DefCacheChunkSize is the default value for chunk size - DefCacheChunkSize = "5M" + DefCacheChunkSize = fs.SizeSuffix(5 * 1024 * 1024) // DefCacheTotalChunkSize is the default value for the maximum size of stored chunks - DefCacheTotalChunkSize = "10G" + DefCacheTotalChunkSize = fs.SizeSuffix(10 * 1024 * 1024 * 1024) // DefCacheChunkCleanInterval is the interval at which chunks are cleaned - DefCacheChunkCleanInterval = "1m" + DefCacheChunkCleanInterval = fs.Duration(time.Minute) // DefCacheInfoAge is the default value for object info age - DefCacheInfoAge = "6h" + DefCacheInfoAge = fs.Duration(6 * time.Hour) // DefCacheReadRetries is the default value for read retries DefCacheReadRetries = 10 // DefCacheTotalWorkers is how many workers run in parallel to download chunks @@ -48,29 +49,9 @@ const ( // DefCacheWrites will cache file data on writes through the cache DefCacheWrites = false // DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded - DefCacheTmpWaitTime = "15m" + DefCacheTmpWaitTime = fs.Duration(15 * time.Second) // DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available - DefCacheDbWaitTime = 1 * time.Second -) - -// Globals -var ( - // Flags - cacheDbPath = flags.StringP("cache-db-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cache DB") - cacheChunkPath = flags.StringP("cache-chunk-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cached chunk files") - cacheDbPurge = flags.BoolP("cache-db-purge", "", false, "Purge the cache DB before") - cacheChunkSize = flags.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk") - cacheTotalChunkSize = flags.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk") - cacheChunkCleanInterval = flags.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs") - cacheInfoAge = flags.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache") - cacheReadRetries = flags.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage") - cacheTotalWorkers = flags.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks") - cacheChunkNoMemory = flags.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming") - cacheRps = flags.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter") - cacheStoreWrites = flags.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS") - cacheTempWritePath = flags.StringP("cache-tmp-upload-path", "", "", "Directory to keep temporary files until they are uploaded to the cloud storage") - cacheTempWaitTime = flags.StringP("cache-tmp-wait-time", "", DefCacheTmpWaitTime, "How long should files be stored in local cache before being uploaded") - cacheDbWaitTime = flags.DurationP("cache-db-wait-time", "", DefCacheDbWaitTime, "How long to wait for the DB to be available - 0 is unlimited") + DefCacheDbWaitTime = fs.Duration(1 * time.Second) ) // Register with Fs @@ -80,73 +61,155 @@ func init() { Description: "Cache a remote", NewFs: NewFs, Options: []fs.Option{{ - Name: "remote", - Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", + Name: "remote", + Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", + Required: true, }, { - Name: "plex_url", - Help: "Optional: The URL of the Plex server", - Optional: true, + Name: "plex_url", + Help: "The URL of the Plex server", }, { - Name: "plex_username", - Help: "Optional: The username of the Plex user", - Optional: true, + Name: "plex_username", + Help: "The username of the Plex user", }, { Name: "plex_password", - Help: "Optional: The password of the Plex user", + Help: "The password of the Plex user", IsPassword: true, - Optional: true, }, { - Name: "chunk_size", - Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading. \nDefault: " + DefCacheChunkSize, - Examples: []fs.OptionExample{ - { - Value: "1m", - Help: "1MB", - }, { - Value: "5M", - Help: "5 MB", - }, { - Value: "10M", - Help: "10 MB", - }, - }, - Optional: true, + Name: "plex_token", + Help: "The plex token for authentication - auto set normally", + Hide: fs.OptionHideBoth, + Advanced: true, }, { - Name: "info_age", - Help: "How much time should object info (file size, file hashes etc) be stored in cache. Use a very high value if you don't plan on changing the source FS from outside the cache. \nAccepted units are: \"s\", \"m\", \"h\".\nDefault: " + DefCacheInfoAge, - Examples: []fs.OptionExample{ - { - Value: "1h", - Help: "1 hour", - }, { - Value: "24h", - Help: "24 hours", - }, { - Value: "48h", - Help: "48 hours", - }, - }, - Optional: true, + Name: "chunk_size", + Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.", + Default: DefCacheChunkSize, + Examples: []fs.OptionExample{{ + Value: "1m", + Help: "1MB", + }, { + Value: "5M", + Help: "5 MB", + }, { + Value: "10M", + Help: "10 MB", + }}, }, { - Name: "chunk_total_size", - Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted. \nDefault: " + DefCacheTotalChunkSize, - Examples: []fs.OptionExample{ - { - Value: "500M", - Help: "500 MB", - }, { - Value: "1G", - Help: "1 GB", - }, { - Value: "10G", - Help: "10 GB", - }, - }, - Optional: true, + Name: "info_age", + Help: "How much time should object info (file size, file hashes etc) be stored in cache.\nUse a very high value if you don't plan on changing the source FS from outside the cache.\nAccepted units are: \"s\", \"m\", \"h\".", + Default: DefCacheInfoAge, + Examples: []fs.OptionExample{{ + Value: "1h", + Help: "1 hour", + }, { + Value: "24h", + Help: "24 hours", + }, { + Value: "48h", + Help: "48 hours", + }}, + }, { + Name: "chunk_total_size", + Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.", + Default: DefCacheTotalChunkSize, + Examples: []fs.OptionExample{{ + Value: "500M", + Help: "500 MB", + }, { + Value: "1G", + Help: "1 GB", + }, { + Value: "10G", + Help: "10 GB", + }}, + }, { + Name: "db_path", + Default: filepath.Join(config.CacheDir, "cache-backend"), + Help: "Directory to cache DB", + Advanced: true, + }, { + Name: "chunk_path", + Default: filepath.Join(config.CacheDir, "cache-backend"), + Help: "Directory to cache chunk files", + Advanced: true, + }, { + Name: "db_purge", + Default: false, + Help: "Purge the cache DB before", + Hide: fs.OptionHideConfigurator, + Advanced: true, + }, { + Name: "chunk_clean_interval", + Default: DefCacheChunkCleanInterval, + Help: "Interval at which chunk cleanup runs", + Advanced: true, + }, { + Name: "read_retries", + Default: DefCacheReadRetries, + Help: "How many times to retry a read from a cache storage", + Advanced: true, + }, { + Name: "workers", + Default: DefCacheTotalWorkers, + Help: "How many workers should run in parallel to download chunks", + Advanced: true, + }, { + Name: "chunk_no_memory", + Default: DefCacheChunkNoMemory, + Help: "Disable the in-memory cache for storing chunks during streaming", + Advanced: true, + }, { + Name: "rps", + Default: int(DefCacheRps), + Help: "Limits the number of requests per second to the source FS. -1 disables the rate limiter", + Advanced: true, + }, { + Name: "writes", + Default: DefCacheWrites, + Help: "Will cache file data on writes through the FS", + Advanced: true, + }, { + Name: "tmp_upload_path", + Default: "", + Help: "Directory to keep temporary files until they are uploaded to the cloud storage", + Advanced: true, + }, { + Name: "tmp_wait_time", + Default: DefCacheTmpWaitTime, + Help: "How long should files be stored in local cache before being uploaded", + Advanced: true, + }, { + Name: "db_wait_time", + Default: DefCacheDbWaitTime, + Help: "How long to wait for the DB to be available - 0 is unlimited", + Advanced: true, }}, }) } +// Options defines the configuration for this backend +type Options struct { + Remote string `config:"remote"` + PlexURL string `config:"plex_url"` + PlexUsername string `config:"plex_username"` + PlexPassword string `config:"plex_password"` + PlexToken string `config:"plex_token"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + InfoAge fs.Duration `config:"info_age"` + ChunkTotalSize fs.SizeSuffix `config:"chunk_total_size"` + DbPath string `config:"db_path"` + ChunkPath string `config:"chunk_path"` + DbPurge bool `config:"db_purge"` + ChunkCleanInterval fs.Duration `config:"chunk_clean_interval"` + ReadRetries int `config:"read_retries"` + TotalWorkers int `config:"workers"` + ChunkNoMemory bool `config:"chunk_no_memory"` + Rps int `config:"rps"` + StoreWrites bool `config:"writes"` + TempWritePath string `config:"tmp_upload_path"` + TempWaitTime fs.Duration `config:"tmp_wait_time"` + DbWaitTime fs.Duration `config:"db_wait_time"` +} + // Fs represents a wrapped fs.Fs type Fs struct { fs.Fs @@ -154,21 +217,10 @@ type Fs struct { name string root string + opt Options // parsed options features *fs.Features // optional features cache *Persistent - - fileAge time.Duration - chunkSize int64 - chunkTotalSize int64 - chunkCleanInterval time.Duration - readRetries int - totalWorkers int - totalMaxWorkers int - chunkMemory bool - cacheWrites bool - tempWritePath string - tempWriteWait time.Duration - tempFs fs.Fs + tempFs fs.Fs lastChunkCleanup time.Time cleanupMu sync.Mutex @@ -188,9 +240,19 @@ func parseRootPath(path string) (string, error) { } // NewFs constructs a Fs from the path, container:path -func NewFs(name, rootPath string) (fs.Fs, error) { - remote := config.FileGet(name, "remote") - if strings.HasPrefix(remote, name+":") { +func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) { + return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)", + opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers) + } + + if strings.HasPrefix(opt.Remote, name+":") { return nil, errors.New("can't point cache remote at itself - check the value of the remote setting") } @@ -199,7 +261,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) { return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath) } - remotePath := path.Join(remote, rpath) + remotePath := path.Join(opt.Remote, rpath) wrappedFs, wrapErr := fs.NewFs(remotePath) if wrapErr != nil && wrapErr != fs.ErrorIsFile { return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath) @@ -210,97 +272,46 @@ func NewFs(name, rootPath string) (fs.Fs, error) { fsErr = fs.ErrorIsFile rpath = cleanPath(path.Dir(rpath)) } - plexURL := config.FileGet(name, "plex_url") - plexToken := config.FileGet(name, "plex_token") - var chunkSize fs.SizeSuffix - chunkSizeString := config.FileGet(name, "chunk_size", DefCacheChunkSize) - if *cacheChunkSize != DefCacheChunkSize { - chunkSizeString = *cacheChunkSize - } - err = chunkSize.Set(chunkSizeString) - if err != nil { - return nil, errors.Wrapf(err, "failed to understand chunk size %v", chunkSizeString) - } - var chunkTotalSize fs.SizeSuffix - chunkTotalSizeString := config.FileGet(name, "chunk_total_size", DefCacheTotalChunkSize) - if *cacheTotalChunkSize != DefCacheTotalChunkSize { - chunkTotalSizeString = *cacheTotalChunkSize - } - err = chunkTotalSize.Set(chunkTotalSizeString) - if err != nil { - return nil, errors.Wrapf(err, "failed to understand chunk total size %v", chunkTotalSizeString) - } - chunkCleanIntervalStr := *cacheChunkCleanInterval - chunkCleanInterval, err := time.ParseDuration(chunkCleanIntervalStr) - if err != nil { - return nil, errors.Wrapf(err, "failed to understand duration %v", chunkCleanIntervalStr) - } - infoAge := config.FileGet(name, "info_age", DefCacheInfoAge) - if *cacheInfoAge != DefCacheInfoAge { - infoAge = *cacheInfoAge - } - infoDuration, err := time.ParseDuration(infoAge) - if err != nil { - return nil, errors.Wrapf(err, "failed to understand duration %v", infoAge) - } - waitTime, err := time.ParseDuration(*cacheTempWaitTime) - if err != nil { - return nil, errors.Wrapf(err, "failed to understand duration %v", *cacheTempWaitTime) - } // configure cache backend - if *cacheDbPurge { + if opt.DbPurge { fs.Debugf(name, "Purging the DB") } f := &Fs{ - Fs: wrappedFs, - name: name, - root: rpath, - fileAge: infoDuration, - chunkSize: int64(chunkSize), - chunkTotalSize: int64(chunkTotalSize), - chunkCleanInterval: chunkCleanInterval, - readRetries: *cacheReadRetries, - totalWorkers: *cacheTotalWorkers, - totalMaxWorkers: *cacheTotalWorkers, - chunkMemory: !*cacheChunkNoMemory, - cacheWrites: *cacheStoreWrites, - lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30), - tempWritePath: *cacheTempWritePath, - tempWriteWait: waitTime, - cleanupChan: make(chan bool, 1), - notifiedRemotes: make(map[string]bool), + Fs: wrappedFs, + name: name, + root: rpath, + opt: *opt, + lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30), + cleanupChan: make(chan bool, 1), + notifiedRemotes: make(map[string]bool), } - if f.chunkTotalSize < (f.chunkSize * int64(f.totalWorkers)) { - return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)", - f.chunkTotalSize, f.chunkSize, f.totalWorkers) - } - f.rateLimiter = rate.NewLimiter(rate.Limit(float64(*cacheRps)), f.totalWorkers) + f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers) f.plexConnector = &plexConnector{} - if plexURL != "" { - if plexToken != "" { - f.plexConnector, err = newPlexConnectorWithToken(f, plexURL, plexToken) + if opt.PlexURL != "" { + if opt.PlexToken != "" { + f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken) if err != nil { - return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL) + return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL) } } else { - plexUsername := config.FileGet(name, "plex_username") - plexPassword := config.FileGet(name, "plex_password") - if plexPassword != "" && plexUsername != "" { - decPass, err := obscure.Reveal(plexPassword) + if opt.PlexPassword != "" && opt.PlexUsername != "" { + decPass, err := obscure.Reveal(opt.PlexPassword) if err != nil { - decPass = plexPassword + decPass = opt.PlexPassword } - f.plexConnector, err = newPlexConnector(f, plexURL, plexUsername, decPass) + f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, func(token string) { + m.Set("plex_token", token) + }) if err != nil { - return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL) + return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL) } } } } - dbPath := *cacheDbPath - chunkPath := *cacheChunkPath + dbPath := f.opt.DbPath + chunkPath := f.opt.ChunkPath // if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath if dbPath != filepath.Join(config.CacheDir, "cache-backend") && chunkPath == filepath.Join(config.CacheDir, "cache-backend") { @@ -326,7 +337,8 @@ func NewFs(name, rootPath string) (fs.Fs, error) { fs.Infof(name, "Cache DB path: %v", dbPath) fs.Infof(name, "Cache chunk path: %v", chunkPath) f.cache, err = GetPersistent(dbPath, chunkPath, &Features{ - PurgeDb: *cacheDbPurge, + PurgeDb: opt.DbPurge, + DbWaitTime: time.Duration(opt.DbWaitTime), }) if err != nil { return nil, errors.Wrapf(err, "failed to start cache db") @@ -335,7 +347,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGHUP) atexit.Register(func() { - if plexURL != "" { + if opt.PlexURL != "" { f.plexConnector.closeWebsocket() } f.StopBackgroundRunners() @@ -350,35 +362,35 @@ func NewFs(name, rootPath string) (fs.Fs, error) { } }() - fs.Infof(name, "Chunk Memory: %v", f.chunkMemory) - fs.Infof(name, "Chunk Size: %v", fs.SizeSuffix(f.chunkSize)) - fs.Infof(name, "Chunk Total Size: %v", fs.SizeSuffix(f.chunkTotalSize)) - fs.Infof(name, "Chunk Clean Interval: %v", f.chunkCleanInterval.String()) - fs.Infof(name, "Workers: %v", f.totalWorkers) - fs.Infof(name, "File Age: %v", f.fileAge.String()) - if f.cacheWrites { + fs.Infof(name, "Chunk Memory: %v", !f.opt.ChunkNoMemory) + fs.Infof(name, "Chunk Size: %v", f.opt.ChunkSize) + fs.Infof(name, "Chunk Total Size: %v", f.opt.ChunkTotalSize) + fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval) + fs.Infof(name, "Workers: %v", f.opt.TotalWorkers) + fs.Infof(name, "File Age: %v", f.opt.InfoAge) + if !f.opt.StoreWrites { fs.Infof(name, "Cache Writes: enabled") } - if f.tempWritePath != "" { - err = os.MkdirAll(f.tempWritePath, os.ModePerm) + if f.opt.TempWritePath != "" { + err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm) if err != nil { - return nil, errors.Wrapf(err, "failed to create cache directory %v", f.tempWritePath) + return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath) } - f.tempWritePath = filepath.ToSlash(f.tempWritePath) - f.tempFs, err = fs.NewFs(f.tempWritePath) + f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath) + f.tempFs, err = fs.NewFs(f.opt.TempWritePath) if err != nil { return nil, errors.Wrapf(err, "failed to create temp fs: %v", err) } - fs.Infof(name, "Upload Temp Rest Time: %v", f.tempWriteWait.String()) - fs.Infof(name, "Upload Temp FS: %v", f.tempWritePath) + fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime) + fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath) f.backgroundRunner, _ = initBackgroundUploader(f) go f.backgroundRunner.run() } go func() { for { - time.Sleep(f.chunkCleanInterval) + time.Sleep(time.Duration(f.opt.ChunkCleanInterval)) select { case <-f.cleanupChan: fs.Infof(f, "stopping cleanup") @@ -391,7 +403,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) { }() if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil { - doChangeNotify(f.receiveChangeNotify, f.chunkCleanInterval) + doChangeNotify(f.receiveChangeNotify, time.Duration(f.opt.ChunkCleanInterval)) } f.features = (&fs.Features{ @@ -400,7 +412,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) { }).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs) // override only those features that use a temp fs and it doesn't support them //f.features.ChangeNotify = f.ChangeNotify - if f.tempWritePath != "" { + if f.opt.TempWritePath != "" { if f.tempFs.Features().Copy == nil { f.features.Copy = nil } @@ -563,7 +575,7 @@ func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) { // notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes // or if we use a temp fs func (f *Fs) notifyChangeUpstreamIfNeeded(remote string, entryType fs.EntryType) { - if f.Fs.Features().ChangeNotify == nil || f.tempWritePath != "" { + if f.Fs.Features().ChangeNotify == nil || f.opt.TempWritePath != "" { f.notifyChangeUpstream(remote, entryType) } } @@ -613,17 +625,17 @@ func (f *Fs) String() string { // ChunkSize returns the configured chunk size func (f *Fs) ChunkSize() int64 { - return f.chunkSize + return int64(f.opt.ChunkSize) } // InfoAge returns the configured file age func (f *Fs) InfoAge() time.Duration { - return f.fileAge + return time.Duration(f.opt.InfoAge) } // TempUploadWaitTime returns the configured temp file upload wait time func (f *Fs) TempUploadWaitTime() time.Duration { - return f.tempWriteWait + return time.Duration(f.opt.TempWaitTime) } // NewObject finds the Object at remote. @@ -636,16 +648,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) { err = f.cache.GetObject(co) if err != nil { fs.Debugf(remote, "find: error: %v", err) - } else if time.Now().After(co.CacheTs.Add(f.fileAge)) { + } else if time.Now().After(co.CacheTs.Add(time.Duration(f.opt.InfoAge))) { fs.Debugf(co, "find: cold object: %+v", co) } else { - fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(f.fileAge)) + fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(time.Duration(f.opt.InfoAge))) return co, nil } // search for entry in source or temp fs var obj fs.Object - if f.tempWritePath != "" { + if f.opt.TempWritePath != "" { obj, err = f.tempFs.NewObject(remote) // not found in temp fs if err != nil { @@ -679,13 +691,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { entries, err = f.cache.GetDirEntries(cd) if err != nil { fs.Debugf(dir, "list: error: %v", err) - } else if time.Now().After(cd.CacheTs.Add(f.fileAge)) { + } else if time.Now().After(cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) { fs.Debugf(dir, "list: cold listing: %v", cd.CacheTs) } else if len(entries) == 0 { // TODO: read empty dirs from source? fs.Debugf(dir, "list: empty listing") } else { - fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(f.fileAge)) + fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) fs.Debugf(dir, "list: cached entries: %v", entries) return entries, nil } @@ -693,7 +705,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // we first search any temporary files stored locally var cachedEntries fs.DirEntries - if f.tempWritePath != "" { + if f.opt.TempWritePath != "" { queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs()) if err != nil { fs.Errorf(dir, "list: error getting pending uploads: %v", err) @@ -744,7 +756,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { case fs.Directory: cdd := DirectoryFromOriginal(f, o) // check if the dir isn't expired and add it in cache if it isn't - if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(f.fileAge)) { + if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) { batchDirectories = append(batchDirectories, cdd) } cachedEntries = append(cachedEntries, cdd) @@ -867,7 +879,7 @@ func (f *Fs) Mkdir(dir string) error { func (f *Fs) Rmdir(dir string) error { fs.Debugf(f, "rmdir '%s'", dir) - if f.tempWritePath != "" { + if f.opt.TempWritePath != "" { // pause background uploads f.backgroundRunner.pause() defer f.backgroundRunner.play() @@ -952,7 +964,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { return fs.ErrorCantDirMove } - if f.tempWritePath != "" { + if f.opt.TempWritePath != "" { // pause background uploads f.backgroundRunner.pause() defer f.backgroundRunner.play() @@ -1079,7 +1091,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i go func() { var offset int64 for { - chunk := make([]byte, f.chunkSize) + chunk := make([]byte, f.opt.ChunkSize) readSize, err := io.ReadFull(pr, chunk) // we ignore 3 failures which are ok: // 1. EOF - original reading finished and we got a full buffer too @@ -1127,7 +1139,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p var obj fs.Object // queue for upload and store in temp fs if configured - if f.tempWritePath != "" { + if f.opt.TempWritePath != "" { // we need to clear the caches before a put through temp fs parentCd := NewDirectory(f, cleanPath(path.Dir(src.Remote()))) _ = f.cache.ExpireDir(parentCd) @@ -1146,7 +1158,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p } fs.Infof(obj, "put: queued for upload") // if cache writes is enabled write it first through cache - } else if f.cacheWrites { + } else if f.opt.StoreWrites { f.cacheReader(in, src, func(inn io.Reader) { obj, err = put(inn, src, options...) }) @@ -1243,7 +1255,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { if srcObj.isTempFile() { // we check if the feature is stil active - if f.tempWritePath == "" { + if f.opt.TempWritePath == "" { fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run") return nil, fs.ErrorCantCopy } @@ -1319,7 +1331,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // if this is a temp object then we perform the changes locally if srcObj.isTempFile() { // we check if the feature is stil active - if f.tempWritePath == "" { + if f.opt.TempWritePath == "" { fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run") return nil, fs.ErrorCantMove } @@ -1460,8 +1472,8 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) { f.cleanupMu.Lock() defer f.cleanupMu.Unlock() - if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(f.chunkCleanInterval)) { - f.cache.CleanChunksBySize(f.chunkTotalSize) + if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(time.Duration(f.opt.ChunkCleanInterval))) { + f.cache.CleanChunksBySize(int64(f.opt.ChunkTotalSize)) f.lastChunkCleanup = time.Now() } } @@ -1470,7 +1482,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) { // can be triggered from a terminate signal or from testing between runs func (f *Fs) StopBackgroundRunners() { f.cleanupChan <- false - if f.tempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() { + if f.opt.TempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() { f.backgroundRunner.close() } f.cache.Close() @@ -1528,7 +1540,7 @@ func (f *Fs) DirCacheFlush() { // GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen // in the background func (f *Fs) GetBackgroundUploadChannel() chan BackgroundUploadState { - if f.tempWritePath != "" { + if f.opt.TempWritePath != "" { return f.backgroundRunner.notifyCh } return nil diff --git a/backend/cache/cache_internal_test.go b/backend/cache/cache_internal_test.go index 814785259..12461df82 100644 --- a/backend/cache/cache_internal_test.go +++ b/backend/cache/cache_internal_test.go @@ -33,13 +33,13 @@ import ( "github.com/ncw/rclone/backend/local" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/object" "github.com/ncw/rclone/fs/rc" "github.com/ncw/rclone/fs/rc/rcflags" "github.com/ncw/rclone/fstest" "github.com/ncw/rclone/vfs" "github.com/ncw/rclone/vfs/vfsflags" - flag "github.com/spf13/pflag" "github.com/stretchr/testify/require" ) @@ -140,7 +140,7 @@ func TestInternalVfsCache(t *testing.T) { vfsflags.Opt.CacheMode = vfs.CacheModeWrites id := "tiuufo" - rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"cache-writes": "true", "cache-info-age": "1h"}) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"}) defer runInstance.cleanupFs(t, rootFs, boltDb) err := rootFs.Mkdir("test") @@ -699,7 +699,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) { rc.Start(&rcflags.Opt) id := fmt.Sprintf("ticsarc%v", time.Now().Unix()) - rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"rc": "true"}) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) defer runInstance.cleanupFs(t, rootFs, boltDb) if !runInstance.useMount { @@ -774,7 +774,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) { func TestInternalCacheWrites(t *testing.T) { id := "ticw" - rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-writes": "true"}) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"}) defer runInstance.cleanupFs(t, rootFs, boltDb) cfs, err := runInstance.getCacheFs(rootFs) @@ -793,7 +793,7 @@ func TestInternalCacheWrites(t *testing.T) { func TestInternalMaxChunkSizeRespected(t *testing.T) { id := fmt.Sprintf("timcsr%v", time.Now().Unix()) - rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-workers": "1"}) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"}) defer runInstance.cleanupFs(t, rootFs, boltDb) cfs, err := runInstance.getCacheFs(rootFs) @@ -868,7 +868,7 @@ func TestInternalBug2117(t *testing.T) { id := fmt.Sprintf("tib2117%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, - map[string]string{"cache-info-age": "72h", "cache-chunk-clean-interval": "15m"}) + map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"}) defer runInstance.cleanupFs(t, rootFs, boltDb) if runInstance.rootIsCrypt { @@ -918,10 +918,7 @@ func TestInternalBug2117(t *testing.T) { // run holds the remotes for a test run type run struct { okDiff time.Duration - allCfgMap map[string]string - allFlagMap map[string]string - runDefaultCfgMap map[string]string - runDefaultFlagMap map[string]string + runDefaultCfgMap configmap.Simple mntDir string tmpUploadDir string useMount bool @@ -945,38 +942,16 @@ func newRun() *run { isMounted: false, } - r.allCfgMap = map[string]string{ - "plex_url": "", - "plex_username": "", - "plex_password": "", - "chunk_size": cache.DefCacheChunkSize, - "info_age": cache.DefCacheInfoAge, - "chunk_total_size": cache.DefCacheTotalChunkSize, + // Read in all the defaults for all the options + fsInfo, err := fs.Find("cache") + if err != nil { + panic(fmt.Sprintf("Couldn't find cache remote: %v", err)) } - r.allFlagMap = map[string]string{ - "cache-db-path": filepath.Join(config.CacheDir, "cache-backend"), - "cache-chunk-path": filepath.Join(config.CacheDir, "cache-backend"), - "cache-db-purge": "true", - "cache-chunk-size": cache.DefCacheChunkSize, - "cache-total-chunk-size": cache.DefCacheTotalChunkSize, - "cache-chunk-clean-interval": cache.DefCacheChunkCleanInterval, - "cache-info-age": cache.DefCacheInfoAge, - "cache-read-retries": strconv.Itoa(cache.DefCacheReadRetries), - "cache-workers": strconv.Itoa(cache.DefCacheTotalWorkers), - "cache-chunk-no-memory": "false", - "cache-rps": strconv.Itoa(cache.DefCacheRps), - "cache-writes": "false", - "cache-tmp-upload-path": "", - "cache-tmp-wait-time": cache.DefCacheTmpWaitTime, - } - r.runDefaultCfgMap = make(map[string]string) - for key, value := range r.allCfgMap { - r.runDefaultCfgMap[key] = value - } - r.runDefaultFlagMap = make(map[string]string) - for key, value := range r.allFlagMap { - r.runDefaultFlagMap[key] = value + r.runDefaultCfgMap = configmap.Simple{} + for _, option := range fsInfo.Options { + r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default)) } + if mountDir == "" { if runtime.GOOS != "windows" { r.mntDir, err = ioutil.TempDir("", "rclonecache-mount") @@ -1086,28 +1061,22 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true}) require.NoError(t, err) - for k, v := range r.runDefaultCfgMap { - if c, ok := cfg[k]; ok { - config.FileSet(cacheRemote, k, c) - } else { - config.FileSet(cacheRemote, k, v) - } - } - for k, v := range r.runDefaultFlagMap { - if c, ok := flags[k]; ok { - _ = flag.Set(k, c) - } else { - _ = flag.Set(k, v) - } - } fs.Config.LowLevelRetries = 1 + m := configmap.Simple{} + for k, v := range r.runDefaultCfgMap { + m.Set(k, v) + } + for k, v := range flags { + m.Set(k, v) + } + // Instantiate root if purge { boltDb.PurgeTempUploads() _ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id)) } - f, err := fs.NewFs(remote + ":" + id) + f, err := cache.NewFs(remote, id, m) require.NoError(t, err) cfs, err := r.getCacheFs(f) require.NoError(t, err) @@ -1157,9 +1126,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) { } r.tempFiles = nil debug.FreeOSMemory() - for k, v := range r.runDefaultFlagMap { - _ = flag.Set(k, v) - } } func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser { diff --git a/backend/cache/cache_upload_test.go b/backend/cache/cache_upload_test.go index e6072f392..c81a37044 100644 --- a/backend/cache/cache_upload_test.go +++ b/backend/cache/cache_upload_test.go @@ -22,7 +22,7 @@ func TestInternalUploadTempDirCreated(t *testing.T) { id := fmt.Sprintf("tiutdc%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, - map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)}) + map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)}) defer runInstance.cleanupFs(t, rootFs, boltDb) _, err := os.Stat(path.Join(runInstance.tmpUploadDir, id)) @@ -63,7 +63,7 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) { id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, - map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"}) + map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"}) defer runInstance.cleanupFs(t, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb) @@ -73,7 +73,7 @@ func TestInternalUploadQueueOneFileWithRest(t *testing.T) { id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, - map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"}) + map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"}) defer runInstance.cleanupFs(t, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb) @@ -83,7 +83,7 @@ func TestInternalUploadMoveExistingFile(t *testing.T) { id := fmt.Sprintf("tiumef%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, - map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"}) + map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) defer runInstance.cleanupFs(t, rootFs, boltDb) err := rootFs.Mkdir("one") @@ -163,7 +163,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) { id := fmt.Sprintf("tiuqmf%v", time.Now().Unix()) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, - map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"}) + map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) defer runInstance.cleanupFs(t, rootFs, boltDb) err := rootFs.Mkdir("test") @@ -213,7 +213,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) { id := "tiutfo" rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, - map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"}) + map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) defer runInstance.cleanupFs(t, rootFs, boltDb) boltDb.PurgeTempUploads() @@ -343,7 +343,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) { id := "tiuufo" rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, - map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"}) + map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) defer runInstance.cleanupFs(t, rootFs, boltDb) boltDb.PurgeTempUploads() diff --git a/backend/cache/cache_upload_test.go.orig b/backend/cache/cache_upload_test.go.orig new file mode 100644 index 000000000..e6072f392 --- /dev/null +++ b/backend/cache/cache_upload_test.go.orig @@ -0,0 +1,455 @@ +// +build !plan9 + +package cache_test + +import ( + "math/rand" + "os" + "path" + "strconv" + "testing" + "time" + + "fmt" + + "github.com/ncw/rclone/backend/cache" + _ "github.com/ncw/rclone/backend/drive" + "github.com/ncw/rclone/fs" + "github.com/stretchr/testify/require" +) + +func TestInternalUploadTempDirCreated(t *testing.T) { + id := fmt.Sprintf("tiutdc%v", time.Now().Unix()) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, + nil, + map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)}) + defer runInstance.cleanupFs(t, rootFs, boltDb) + + _, err := os.Stat(path.Join(runInstance.tmpUploadDir, id)) + require.NoError(t, err) +} + +func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) { + // create some rand test data + testSize := int64(524288000) + testReader := runInstance.randomReader(t, testSize) + bu := runInstance.listenForBackgroundUpload(t, rootFs, "one") + runInstance.writeRemoteReader(t, rootFs, "one", testReader) + // validate that it exists in temp fs + ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one"))) + require.NoError(t, err) + + if runInstance.rootIsCrypt { + require.Equal(t, int64(524416032), ti.Size()) + } else { + require.Equal(t, testSize, ti.Size()) + } + de1, err := runInstance.list(t, rootFs, "") + require.NoError(t, err) + require.Len(t, de1, 1) + + runInstance.completeBackgroundUpload(t, "one", bu) + // check if it was removed from temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one"))) + require.True(t, os.IsNotExist(err)) + + // check if it can be read + data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false) + require.NoError(t, err) + require.Len(t, data2, 1024) +} + +func TestInternalUploadQueueOneFileNoRest(t *testing.T) { + id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix()) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, + nil, + map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"}) + defer runInstance.cleanupFs(t, rootFs, boltDb) + + testInternalUploadQueueOneFile(t, id, rootFs, boltDb) +} + +func TestInternalUploadQueueOneFileWithRest(t *testing.T) { + id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix()) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, + nil, + map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"}) + defer runInstance.cleanupFs(t, rootFs, boltDb) + + testInternalUploadQueueOneFile(t, id, rootFs, boltDb) +} + +func TestInternalUploadMoveExistingFile(t *testing.T) { + id := fmt.Sprintf("tiumef%v", time.Now().Unix()) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, + nil, + map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"}) + defer runInstance.cleanupFs(t, rootFs, boltDb) + + err := rootFs.Mkdir("one") + require.NoError(t, err) + err = rootFs.Mkdir("one/test") + require.NoError(t, err) + err = rootFs.Mkdir("second") + require.NoError(t, err) + + // create some rand test data + testSize := int64(10485760) + testReader := runInstance.randomReader(t, testSize) + runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader) + runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin") + + de1, err := runInstance.list(t, rootFs, "one/test") + require.NoError(t, err) + require.Len(t, de1, 1) + + time.Sleep(time.Second * 5) + //_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test"))) + //require.NoError(t, err) + + err = runInstance.dirMove(t, rootFs, "one/test", "second/test") + require.NoError(t, err) + + // check if it can be read + de1, err = runInstance.list(t, rootFs, "second/test") + require.NoError(t, err) + require.Len(t, de1, 1) +} + +func TestInternalUploadTempPathCleaned(t *testing.T) { + id := fmt.Sprintf("tiutpc%v", time.Now().Unix()) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, + nil, + map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) + defer runInstance.cleanupFs(t, rootFs, boltDb) + + err := rootFs.Mkdir("one") + require.NoError(t, err) + err = rootFs.Mkdir("one/test") + require.NoError(t, err) + err = rootFs.Mkdir("second") + require.NoError(t, err) + + // create some rand test data + testSize := int64(1048576) + testReader := runInstance.randomReader(t, testSize) + testReader2 := runInstance.randomReader(t, testSize) + runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader) + runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2) + + runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin") + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test"))) + require.True(t, os.IsNotExist(err)) + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one"))) + require.True(t, os.IsNotExist(err)) + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second"))) + require.False(t, os.IsNotExist(err)) + + runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin") + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin"))) + require.True(t, os.IsNotExist(err)) + + de1, err := runInstance.list(t, rootFs, "one/test") + require.NoError(t, err) + require.Len(t, de1, 1) + + // check if it can be read + de1, err = runInstance.list(t, rootFs, "second") + require.NoError(t, err) + require.Len(t, de1, 1) +} + +func TestInternalUploadQueueMoreFiles(t *testing.T) { + id := fmt.Sprintf("tiuqmf%v", time.Now().Unix()) + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, + nil, + map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"}) + defer runInstance.cleanupFs(t, rootFs, boltDb) + + err := rootFs.Mkdir("test") + require.NoError(t, err) + minSize := 5242880 + maxSize := 10485760 + totalFiles := 10 + rand.Seed(time.Now().Unix()) + + lastFile := "" + for i := 0; i < totalFiles; i++ { + size := int64(rand.Intn(maxSize-minSize) + minSize) + testReader := runInstance.randomReader(t, size) + remote := "test/" + strconv.Itoa(i) + ".bin" + runInstance.writeRemoteReader(t, rootFs, remote, testReader) + + // validate that it exists in temp fs + ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote))) + require.NoError(t, err) + require.Equal(t, size, runInstance.cleanSize(t, ti.Size())) + + if runInstance.wrappedIsExternal && i < totalFiles-1 { + time.Sleep(time.Second * 3) + } + lastFile = remote + } + + // check if cache lists all files, likely temp upload didn't finish yet + de1, err := runInstance.list(t, rootFs, "test") + require.NoError(t, err) + require.Len(t, de1, totalFiles) + + // wait for background uploader to do its thing + runInstance.completeAllBackgroundUploads(t, rootFs, lastFile) + + // retry until we have no more temp files and fail if they don't go down to 0 + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test"))) + require.True(t, os.IsNotExist(err)) + + // check if cache lists all files + de1, err = runInstance.list(t, rootFs, "test") + require.NoError(t, err) + require.Len(t, de1, totalFiles) +} + +func TestInternalUploadTempFileOperations(t *testing.T) { + id := "tiutfo" + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, + nil, + map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"}) + defer runInstance.cleanupFs(t, rootFs, boltDb) + + boltDb.PurgeTempUploads() + + // create some rand test data + runInstance.mkdir(t, rootFs, "test") + runInstance.writeRemoteString(t, rootFs, "test/one", "one content") + + // check if it can be read + data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false) + require.NoError(t, err) + require.Equal(t, []byte("one content"), data1) + // validate that it exists in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + + // test DirMove - allowed + err = runInstance.dirMove(t, rootFs, "test", "second") + if err != errNotSupported { + require.NoError(t, err) + _, err = rootFs.NewObject("test/one") + require.Error(t, err) + _, err = rootFs.NewObject("second/one") + require.NoError(t, err) + // validate that it exists in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.Error(t, err) + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one"))) + require.NoError(t, err) + _, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one"))) + require.Error(t, err) + var started bool + started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one"))) + require.NoError(t, err) + require.False(t, started) + runInstance.mkdir(t, rootFs, "test") + runInstance.writeRemoteString(t, rootFs, "test/one", "one content") + } + + // test Rmdir - allowed + err = runInstance.rm(t, rootFs, "test") + require.Error(t, err) + require.Contains(t, err.Error(), "directory not empty") + _, err = rootFs.NewObject("test/one") + require.NoError(t, err) + // validate that it exists in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one"))) + require.False(t, started) + require.NoError(t, err) + + // test Move/Rename -- allowed + err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second")) + if err != errNotSupported { + require.NoError(t, err) + // try to read from it + _, err = rootFs.NewObject("test/one") + require.Error(t, err) + _, err = rootFs.NewObject("test/second") + require.NoError(t, err) + data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false) + require.NoError(t, err) + require.Equal(t, []byte("one content"), data2) + // validate that it exists in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.Error(t, err) + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second"))) + require.NoError(t, err) + runInstance.writeRemoteString(t, rootFs, "test/one", "one content") + } + + // test Copy -- allowed + err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) + if err != errNotSupported { + require.NoError(t, err) + _, err = rootFs.NewObject("test/one") + require.NoError(t, err) + _, err = rootFs.NewObject("test/third") + require.NoError(t, err) + data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) + require.NoError(t, err) + require.Equal(t, []byte("one content"), data2) + // validate that it exists in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third"))) + require.NoError(t, err) + } + + // test Remove -- allowed + err = runInstance.rm(t, rootFs, "test/one") + require.NoError(t, err) + _, err = rootFs.NewObject("test/one") + require.Error(t, err) + // validate that it doesn't exist in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.Error(t, err) + runInstance.writeRemoteString(t, rootFs, "test/one", "one content") + + // test Update -- allowed + firstModTime, err := runInstance.modTime(t, rootFs, "test/one") + require.NoError(t, err) + err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated") + require.NoError(t, err) + obj2, err := rootFs.NewObject("test/one") + require.NoError(t, err) + data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false) + require.Equal(t, "one content updated", string(data2)) + tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + if runInstance.rootIsCrypt { + require.Equal(t, int64(67), tmpInfo.Size()) + } else { + require.Equal(t, int64(len(data2)), tmpInfo.Size()) + } + + // test SetModTime -- allowed + secondModTime, err := runInstance.modTime(t, rootFs, "test/one") + require.NoError(t, err) + require.NotEqual(t, secondModTime, firstModTime) + require.NotEqual(t, time.Time{}, firstModTime) + require.NotEqual(t, time.Time{}, secondModTime) +} + +func TestInternalUploadUploadingFileOperations(t *testing.T) { + id := "tiuufo" + rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, + nil, + map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"}) + defer runInstance.cleanupFs(t, rootFs, boltDb) + + boltDb.PurgeTempUploads() + + // create some rand test data + runInstance.mkdir(t, rootFs, "test") + runInstance.writeRemoteString(t, rootFs, "test/one", "one content") + + // check if it can be read + data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false) + require.NoError(t, err) + require.Equal(t, []byte("one content"), data1) + // validate that it exists in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + + err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one"))) + require.NoError(t, err) + + // test DirMove + err = runInstance.dirMove(t, rootFs, "test", "second") + if err != errNotSupported { + require.Error(t, err) + _, err = rootFs.NewObject("test/one") + require.NoError(t, err) + // validate that it exists in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one"))) + require.Error(t, err) + } + + // test Rmdir + err = runInstance.rm(t, rootFs, "test") + require.Error(t, err) + _, err = rootFs.NewObject("test/one") + require.NoError(t, err) + // validate that it doesn't exist in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + + // test Move/Rename + err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second")) + if err != errNotSupported { + require.Error(t, err) + // try to read from it + _, err = rootFs.NewObject("test/one") + require.NoError(t, err) + _, err = rootFs.NewObject("test/second") + require.Error(t, err) + // validate that it exists in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second"))) + require.Error(t, err) + } + + // test Copy -- allowed + err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) + if err != errNotSupported { + require.NoError(t, err) + _, err = rootFs.NewObject("test/one") + require.NoError(t, err) + _, err = rootFs.NewObject("test/third") + require.NoError(t, err) + data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) + require.NoError(t, err) + require.Equal(t, []byte("one content"), data2) + // validate that it exists in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third"))) + require.NoError(t, err) + } + + // test Remove + err = runInstance.rm(t, rootFs, "test/one") + require.Error(t, err) + _, err = rootFs.NewObject("test/one") + require.NoError(t, err) + // validate that it doesn't exist in temp fs + _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + require.NoError(t, err) + runInstance.writeRemoteString(t, rootFs, "test/one", "one content") + + // test Update - this seems to work. Why? FIXME + //firstModTime, err := runInstance.modTime(t, rootFs, "test/one") + //require.NoError(t, err) + //err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() { + // data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true) + // require.Equal(t, "one content", string(data2)) + // + // tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) + // require.NoError(t, err) + // if runInstance.rootIsCrypt { + // require.Equal(t, int64(67), tmpInfo.Size()) + // } else { + // require.Equal(t, int64(len(data2)), tmpInfo.Size()) + // } + //}) + //require.Error(t, err) + + // test SetModTime -- seems to work cause of previous + //secondModTime, err := runInstance.modTime(t, rootFs, "test/one") + //require.NoError(t, err) + //require.Equal(t, secondModTime, firstModTime) + //require.NotEqual(t, time.Time{}, firstModTime) + //require.NotEqual(t, time.Time{}, secondModTime) +} diff --git a/backend/cache/cache_upload_test.go.rej b/backend/cache/cache_upload_test.go.rej new file mode 100644 index 000000000..a84a3d887 --- /dev/null +++ b/backend/cache/cache_upload_test.go.rej @@ -0,0 +1,12 @@ +--- cache_upload_test.go ++++ cache_upload_test.go +@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) { + } + r.tempFiles = nil + debug.FreeOSMemory() +- for k, v := range r.runDefaultFlagMap { +- _ = flag.Set(k, v) +- } + } + + func (r *run) randomBytes(t *testing.T, size int64) []byte { diff --git a/backend/cache/handle.go b/backend/cache/handle.go index d78bbf737..f60829928 100644 --- a/backend/cache/handle.go +++ b/backend/cache/handle.go @@ -65,14 +65,14 @@ func NewObjectHandle(o *Object, cfs *Fs) *Handle { offset: 0, preloadOffset: -1, // -1 to trigger the first preload - UseMemory: cfs.chunkMemory, + UseMemory: !cfs.opt.ChunkNoMemory, reading: false, } r.seenOffsets = make(map[int64]bool) r.memory = NewMemory(-1) // create a larger buffer to queue up requests - r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10) + r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10) r.confirmReading = make(chan bool) r.startReadWorkers() return r @@ -98,7 +98,7 @@ func (r *Handle) startReadWorkers() { if r.hasAtLeastOneWorker() { return } - totalWorkers := r.cacheFs().totalWorkers + totalWorkers := r.cacheFs().opt.TotalWorkers if r.cacheFs().plexConnector.isConfigured() { if !r.cacheFs().plexConnector.isConnected() { @@ -156,7 +156,7 @@ func (r *Handle) confirmExternalReading() { return } fs.Infof(r, "confirmed reading by external reader") - r.scaleWorkers(r.cacheFs().totalMaxWorkers) + r.scaleWorkers(r.cacheFs().opt.TotalWorkers) } // queueOffset will send an offset to the workers if it's different from the last one @@ -179,7 +179,7 @@ func (r *Handle) queueOffset(offset int64) { } for i := 0; i < len(r.workers); i++ { - o := r.preloadOffset + r.cacheFs().chunkSize*int64(i) + o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i) if o < 0 || o >= r.cachedObject.Size() { continue } @@ -211,7 +211,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) { var err error // we calculate the modulus of the requested offset with the size of a chunk - offset := chunkStart % r.cacheFs().chunkSize + offset := chunkStart % int64(r.cacheFs().opt.ChunkSize) // we align the start offset of the first chunk to a likely chunk in the storage chunkStart = chunkStart - offset @@ -228,7 +228,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) { if !found { // we're gonna give the workers a chance to pickup the chunk // and retry a couple of times - for i := 0; i < r.cacheFs().readRetries*8; i++ { + for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ { data, err = r.storage().GetChunk(r.cachedObject, chunkStart) if err == nil { found = true @@ -255,7 +255,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) { if offset > 0 { if offset > int64(len(data)) { fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v", - r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size()) + r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size()) return nil, io.ErrUnexpectedEOF } data = data[int(offset):] @@ -338,9 +338,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) { err = errors.Errorf("cache: unimplemented seek whence %v", whence) } - chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize) - if chunkStart >= r.cacheFs().chunkSize { - chunkStart = chunkStart - r.cacheFs().chunkSize + chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize)) + if chunkStart >= int64(r.cacheFs().opt.ChunkSize) { + chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize) } r.queueOffset(chunkStart) @@ -451,7 +451,7 @@ func (w *worker) run() { } } - chunkEnd := chunkStart + w.r.cacheFs().chunkSize + chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize) // TODO: Remove this comment if it proves to be reliable for #1896 //if chunkEnd > w.r.cachedObject.Size() { // chunkEnd = w.r.cachedObject.Size() @@ -466,7 +466,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) { var data []byte // stop retries - if retry >= w.r.cacheFs().readRetries { + if retry >= w.r.cacheFs().opt.ReadRetries { return } // back-off between retries @@ -612,7 +612,7 @@ func (b *backgroundWriter) run() { return } - absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait) + absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime)) if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) { time.Sleep(time.Second) continue diff --git a/backend/cache/object.go b/backend/cache/object.go index 10d13aa0a..b04a97df2 100644 --- a/backend/cache/object.go +++ b/backend/cache/object.go @@ -44,7 +44,7 @@ func NewObject(f *Fs, remote string) *Object { cacheType := objectInCache parentFs := f.UnWrap() - if f.tempWritePath != "" { + if f.opt.TempWritePath != "" { _, err := f.cache.SearchPendingUpload(fullRemote) if err == nil { // queued for upload cacheType = objectPendingUpload @@ -75,7 +75,7 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object { cacheType := objectInCache parentFs := f.UnWrap() - if f.tempWritePath != "" { + if f.opt.TempWritePath != "" { _, err := f.cache.SearchPendingUpload(fullRemote) if err == nil { // queued for upload cacheType = objectPendingUpload @@ -153,7 +153,7 @@ func (o *Object) Storable() bool { // 2. is not pending a notification from the wrapped fs func (o *Object) refresh() error { isNotified := o.CacheFs.isNotifiedRemote(o.Remote()) - isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge)) + isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge))) if !isExpired && !isNotified { return nil } @@ -237,7 +237,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio return err } // pause background uploads if active - if o.CacheFs.tempWritePath != "" { + if o.CacheFs.opt.TempWritePath != "" { o.CacheFs.backgroundRunner.pause() defer o.CacheFs.backgroundRunner.play() // don't allow started uploads @@ -274,7 +274,7 @@ func (o *Object) Remove() error { return err } // pause background uploads if active - if o.CacheFs.tempWritePath != "" { + if o.CacheFs.opt.TempWritePath != "" { o.CacheFs.backgroundRunner.pause() defer o.CacheFs.backgroundRunner.play() // don't allow started uploads diff --git a/backend/cache/plex.go b/backend/cache/plex.go index 765b9fb5d..424455fb1 100644 --- a/backend/cache/plex.go +++ b/backend/cache/plex.go @@ -16,7 +16,6 @@ import ( "io/ioutil" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" "github.com/patrickmn/go-cache" "golang.org/x/net/websocket" ) @@ -60,10 +59,11 @@ type plexConnector struct { running bool runningMu sync.Mutex stateCache *cache.Cache + saveToken func(string) } // newPlexConnector connects to a Plex server and generates a token -func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) { +func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) { u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/")) if err != nil { return nil, err @@ -76,6 +76,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector password: password, token: "", stateCache: cache.New(time.Hour, time.Minute), + saveToken: saveToken, } return pc, nil @@ -209,8 +210,7 @@ func (p *plexConnector) authenticate() error { } p.token = token if p.token != "" { - config.FileSet(p.f.Name(), "plex_token", p.token) - config.SaveConfig() + p.saveToken(p.token) fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String()) } p.listenWebsocket() diff --git a/backend/cache/storage_persistent.go b/backend/cache/storage_persistent.go index 8bdfe51ad..57406bbd7 100644 --- a/backend/cache/storage_persistent.go +++ b/backend/cache/storage_persistent.go @@ -34,7 +34,8 @@ const ( // Features flags for this storage type type Features struct { - PurgeDb bool // purge the db before starting + PurgeDb bool // purge the db before starting + DbWaitTime time.Duration // time to wait for DB to be available } var boltMap = make(map[string]*Persistent) @@ -122,7 +123,7 @@ func (b *Persistent) connect() error { if err != nil { return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath) } - b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: *cacheDbWaitTime}) + b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime}) if err != nil { return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath) } @@ -342,7 +343,7 @@ func (b *Persistent) RemoveDir(fp string) error { // ExpireDir will flush a CachedDirectory and all its objects from the objects // chunks will remain as they are func (b *Persistent) ExpireDir(cd *Directory) error { - t := time.Now().Add(cd.CacheFs.fileAge * -1) + t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge)) cd.CacheTs = &t // expire all parents @@ -429,7 +430,7 @@ func (b *Persistent) RemoveObject(fp string) error { // ExpireObject will flush an Object and all its data if desired func (b *Persistent) ExpireObject(co *Object, withData bool) error { - co.CacheTs = time.Now().Add(co.CacheFs.fileAge * -1) + co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge)) err := b.AddObject(co) if withData { _ = os.RemoveAll(path.Join(b.dataPath, co.abs())) diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index 70d6218c0..53b10ff2f 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -5,24 +5,18 @@ import ( "fmt" "io" "path" - "strconv" "strings" "time" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/hash" "github.com/pkg/errors" ) // Globals -var ( - // Flags - cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.") -) - // Register with Fs func init() { fs.Register(&fs.RegInfo{ @@ -30,11 +24,13 @@ func init() { Description: "Encrypt/Decrypt a remote", NewFs: NewFs, Options: []fs.Option{{ - Name: "remote", - Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", + Name: "remote", + Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", + Required: true, }, { - Name: "filename_encryption", - Help: "How to encrypt the filenames.", + Name: "filename_encryption", + Help: "How to encrypt the filenames.", + Default: "standard", Examples: []fs.OptionExample{ { Value: "off", @@ -48,8 +44,9 @@ func init() { }, }, }, { - Name: "directory_name_encryption", - Help: "Option to either encrypt directory names or leave them intact.", + Name: "directory_name_encryption", + Help: "Option to either encrypt directory names or leave them intact.", + Default: true, Examples: []fs.OptionExample{ { Value: "true", @@ -68,50 +65,67 @@ func init() { Name: "password2", Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.", IsPassword: true, - Optional: true, + }, { + Name: "show_mapping", + Help: "For all files listed show how the names encrypt.", + Default: false, + Hide: fs.OptionHideConfigurator, + Advanced: true, }}, }) } -// NewCipher constructs a Cipher for the given config name -func NewCipher(name string) (Cipher, error) { - mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard")) +// newCipherForConfig constructs a Cipher for the given config name +func newCipherForConfig(opt *Options) (Cipher, error) { + mode, err := NewNameEncryptionMode(opt.FilenameEncryption) if err != nil { return nil, err } - dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true")) - if err != nil { - return nil, err - } - password := config.FileGet(name, "password", "") - if password == "" { + if opt.Password == "" { return nil, errors.New("password not set in config file") } - password, err = obscure.Reveal(password) + password, err := obscure.Reveal(opt.Password) if err != nil { return nil, errors.Wrap(err, "failed to decrypt password") } - salt := config.FileGet(name, "password2", "") - if salt != "" { - salt, err = obscure.Reveal(salt) + var salt string + if opt.Password2 != "" { + salt, err = obscure.Reveal(opt.Password2) if err != nil { return nil, errors.Wrap(err, "failed to decrypt password2") } } - cipher, err := newCipher(mode, password, salt, dirNameEncrypt) + cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption) if err != nil { return nil, errors.Wrap(err, "failed to make cipher") } return cipher, nil } -// NewFs contstructs an Fs from the path, container:path -func NewFs(name, rpath string) (fs.Fs, error) { - cipher, err := NewCipher(name) +// NewCipher constructs a Cipher for the given config +func NewCipher(m configmap.Mapper) (Cipher, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) if err != nil { return nil, err } - remote := config.FileGet(name, "remote") + return newCipherForConfig(opt) +} + +// NewFs contstructs an Fs from the path, container:path +func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + cipher, err := newCipherForConfig(opt) + if err != nil { + return nil, err + } + remote := opt.Remote if strings.HasPrefix(remote, name+":") { return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting") } @@ -130,6 +144,7 @@ func NewFs(name, rpath string) (fs.Fs, error) { Fs: wrappedFs, name: name, root: rpath, + opt: *opt, cipher: cipher, } // the features here are ones we could support, and they are @@ -161,11 +176,22 @@ func NewFs(name, rpath string) (fs.Fs, error) { return f, err } +// Options defines the configuration for this backend +type Options struct { + Remote string `config:"remote"` + FilenameEncryption string `config:"filename_encryption"` + DirectoryNameEncryption bool `config:"directory_name_encryption"` + Password string `config:"password"` + Password2 string `config:"password2"` + ShowMapping bool `config:"show_mapping"` +} + // Fs represents a wrapped fs.Fs type Fs struct { fs.Fs name string root string + opt Options features *fs.Features // optional features cipher Cipher } @@ -198,7 +224,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) { fs.Debugf(remote, "Skipping undecryptable file name: %v", err) return } - if *cryptShowMapping { + if f.opt.ShowMapping { fs.Logf(decryptedRemote, "Encrypts to %q", remote) } *entries = append(*entries, f.newObject(obj)) @@ -212,7 +238,7 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) { fs.Debugf(remote, "Skipping undecryptable dir name: %v", err) return } - if *cryptShowMapping { + if f.opt.ShowMapping { fs.Logf(decryptedRemote, "Encrypts to %q", remote) } *entries = append(*entries, f.newDir(dir)) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 6e3b5fe35..f4c53c24c 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -23,7 +23,8 @@ import ( "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fshttp" @@ -49,27 +50,13 @@ const ( defaultExtensions = "docx,xlsx,pptx,svg" scopePrefix = "https://www.googleapis.com/auth/" defaultScope = "drive" + // chunkSize is the size of the chunks created during a resumable upload and should be a power of two. + // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. + defaultChunkSize = fs.SizeSuffix(8 * 1024 * 1024) ) // Globals var ( - // Flags - driveAuthOwnerOnly = flags.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.") - driveUseTrash = flags.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.") - driveSkipGdocs = flags.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.") - driveSharedWithMe = flags.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me") - driveTrashedOnly = flags.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash") - driveExtensions = flags.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.") - driveUseCreatedDate = flags.BoolP("drive-use-created-date", "", false, "Use created date instead of modified date.") - driveListChunk = flags.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.") - driveImpersonate = flags.StringP("drive-impersonate", "", "", "Impersonate this user when using a service account.") - driveAlternateExport = flags.BoolP("drive-alternate-export", "", false, "Use alternate export URLs for google documents export.") - driveAcknowledgeAbuse = flags.BoolP("drive-acknowledge-abuse", "", false, "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.") - driveKeepRevisionForever = flags.BoolP("drive-keep-revision-forever", "", false, "Keep new head revision forever.") - // chunkSize is the size of the chunks created during a resumable upload and should be a power of two. - // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. - chunkSize = fs.SizeSuffix(8 * 1024 * 1024) - driveUploadCutoff = chunkSize // Description of how to auth for this app driveConfig = &oauth2.Config{ Scopes: []string{scopePrefix + "drive"}, @@ -112,38 +99,43 @@ func init() { Name: "drive", Description: "Google Drive", NewFs: NewFs, - Config: func(name string) { - var err error + Config: func(name string, m configmap.Mapper) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + fs.Errorf(nil, "Couldn't parse config into struct: %v", err) + return + } // Fill in the scopes - scope := config.FileGet(name, "scope") - if scope == "" { - scope = defaultScope + if opt.Scope == "" { + opt.Scope = defaultScope } driveConfig.Scopes = nil - for _, scope := range strings.Split(scope, ",") { + for _, scope := range strings.Split(opt.Scope, ",") { driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope)) // Set the root_folder_id if using drive.appfolder if scope == "drive.appfolder" { - config.FileSet(name, "root_folder_id", "appDataFolder") + m.Set("root_folder_id", "appDataFolder") } } - if config.FileGet(name, "service_account_file") == "" { - err = oauthutil.Config("drive", name, driveConfig) + if opt.ServiceAccountFile == "" { + err = oauthutil.Config("drive", name, m, driveConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } } - err = configTeamDrive(name) + err = configTeamDrive(opt, m, name) if err != nil { log.Fatalf("Failed to configure team drive: %v", err) } }, Options: []fs.Option{{ Name: config.ConfigClientID, - Help: "Google Application Client Id - leave blank normally.", + Help: "Google Application Client Id\nLeave blank normally.", }, { Name: config.ConfigClientSecret, - Help: "Google Application Client Secret - leave blank normally.", + Help: "Google Application Client Secret\nLeave blank normally.", }, { Name: "scope", Help: "Scope that rclone should use when requesting access from drive.", @@ -165,14 +157,92 @@ func init() { }}, }, { Name: "root_folder_id", - Help: "ID of the root folder - leave blank normally. Fill in to access \"Computers\" folders. (see docs).", + Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).", }, { Name: "service_account_file", - Help: "Service Account Credentials JSON file path - leave blank normally.\nNeeded only if you want use SA instead of interactive login.", + Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", + }, { + Name: "service_account_credentials", + Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", + Hide: fs.OptionHideBoth, + Advanced: true, + }, { + Name: "team_drive", + Help: "ID of the Team Drive", + Hide: fs.OptionHideBoth, + Advanced: true, + }, { + Name: "auth_owner_only", + Default: false, + Help: "Only consider files owned by the authenticated user.", + Advanced: true, + }, { + Name: "use_trash", + Default: true, + Help: "Send files to the trash instead of deleting permanently.", + Advanced: true, + }, { + Name: "skip_gdocs", + Default: false, + Help: "Skip google documents in all listings.", + Advanced: true, + }, { + Name: "shared_with_me", + Default: false, + Help: "Only show files that are shared with me", + Advanced: true, + }, { + Name: "trashed_only", + Default: false, + Help: "Only show files that are in the trash", + Advanced: true, + }, { + Name: "formats", + Default: defaultExtensions, + Help: "Comma separated list of preferred formats for downloading Google docs.", + Advanced: true, + }, { + Name: "use_created_date", + Default: false, + Help: "Use created date instead of modified date.", + Advanced: true, + }, { + Name: "list_chunk", + Default: 1000, + Help: "Size of listing chunk 100-1000. 0 to disable.", + Advanced: true, + }, { + Name: "impersonate", + Default: "", + Help: "Impersonate this user when using a service account.", + Advanced: true, + }, { + Name: "alternate_export", + Default: false, + Help: "Use alternate export URLs for google documents export.", + Advanced: true, + }, { + Name: "upload_cutoff", + Default: defaultChunkSize, + Help: "Cutoff for switching to chunked upload", + Advanced: true, + }, { + Name: "chunk_size", + Default: defaultChunkSize, + Help: "Upload chunk size. Must a power of 2 >= 256k.", + Advanced: true, + }, { + Name: "acknowledge_abuse", + Default: false, + Help: "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.", + Advanced: true, + }, { + Name: "keep_revision_forever", + Default: false, + Help: "Keep new head revision forever.", + Advanced: true, }}, }) - flags.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload") - flags.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.") // Invert mimeTypeToExtension extensionToMimeType = make(map[string]string, len(mimeTypeToExtension)) @@ -181,10 +251,34 @@ func init() { } } +// Options defines the configuration for this backend +type Options struct { + Scope string `config:"scope"` + RootFolderID string `config:"root_folder_id"` + ServiceAccountFile string `config:"service_account_file"` + ServiceAccountCredentials string `config:"service_account_credentials"` + TeamDriveID string `config:"team_drive"` + AuthOwnerOnly bool `config:"auth_owner_only"` + UseTrash bool `config:"use_trash"` + SkipGdocs bool `config:"skip_gdocs"` + SharedWithMe bool `config:"shared_with_me"` + TrashedOnly bool `config:"trashed_only"` + Extensions string `config:"formats"` + UseCreatedDate bool `config:"use_created_date"` + ListChunk int64 `config:"list_chunk"` + Impersonate string `config:"impersonate"` + AlternateExport bool `config:"alternate_export"` + UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + AcknowledgeAbuse bool `config:"acknowledge_abuse"` + KeepRevisionForever bool `config:"keep_revision_forever"` +} + // Fs represents a remote drive server type Fs struct { name string // name of this remote root string // the path we are working on + opt Options // parsed options features *fs.Features // optional features svc *drive.Service // the connection to the drive server client *http.Client // authorized client @@ -192,7 +286,6 @@ type Fs struct { dirCache *dircache.DirCache // Map of directory path to directory id pacer *pacer.Pacer // To pace the API calls extensions []string // preferred extensions to download docs - teamDriveID string // team drive ID, may be "" isTeamDrive bool // true if this is a team drive } @@ -274,8 +367,8 @@ type listFn func(*drive.File) bool func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bool, includeAll bool, fn listFn) (found bool, err error) { var query []string if !includeAll { - q := "trashed=" + strconv.FormatBool(*driveTrashedOnly) - if *driveTrashedOnly { + q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly) + if f.opt.TrashedOnly { q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q) } query = append(query, q) @@ -283,10 +376,10 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo // Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents) // We must not filter with parent when we try list "ROOT" with drive-shared-with-me // If we need to list file inside those shared folders, we must search it without sharedWithMe - if *driveSharedWithMe && dirID == f.rootFolderID { + if f.opt.SharedWithMe && dirID == f.rootFolderID { query = append(query, "sharedWithMe=true") } - if dirID != "" && !(*driveSharedWithMe && dirID == f.rootFolderID) { + if dirID != "" && !(f.opt.SharedWithMe && dirID == f.rootFolderID) { query = append(query, fmt.Sprintf("'%s' in parents", dirID)) } if title != "" { @@ -308,11 +401,11 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo list.Q(strings.Join(query, " and ")) // fmt.Printf("list Query = %q\n", query) } - if *driveListChunk > 0 { - list.PageSize(*driveListChunk) + if f.opt.ListChunk > 0 { + list.PageSize(f.opt.ListChunk) } if f.isTeamDrive { - list.TeamDriveId(f.teamDriveID) + list.TeamDriveId(f.opt.TeamDriveID) list.SupportsTeamDrives(true) list.IncludeTeamDriveItems(true) list.Corpora("teamDrive") @@ -324,7 +417,7 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo var fields = partialFields - if *driveAuthOwnerOnly { + if f.opt.AuthOwnerOnly { fields += ",owners" } @@ -395,17 +488,16 @@ func (f *Fs) parseExtensions(extensions string) error { } // Figure out if the user wants to use a team drive -func configTeamDrive(name string) error { - teamDrive := config.FileGet(name, "team_drive") - if teamDrive == "" { +func configTeamDrive(opt *Options, m configmap.Mapper, name string) error { + if opt.TeamDriveID == "" { fmt.Printf("Configure this as a team drive?\n") } else { - fmt.Printf("Change current team drive ID %q?\n", teamDrive) + fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID) } if !config.ConfirmWithDefault(false) { return nil } - client, err := createOAuthClient(name) + client, err := createOAuthClient(opt, name, m) if err != nil { return errors.Wrap(err, "config team drive failed to create oauth client") } @@ -440,7 +532,8 @@ func configTeamDrive(name string) error { } else { driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true) } - config.FileSet(name, "team_drive", driveID) + m.Set("team_drive", driveID) + opt.TeamDriveID = driveID return nil } @@ -449,39 +542,37 @@ func newPacer() *pacer.Pacer { return pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer) } -func getServiceAccountClient(credentialsData []byte) (*http.Client, error) { +func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) { conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...) if err != nil { return nil, errors.Wrap(err, "error processing credentials") } - if *driveImpersonate != "" { - conf.Subject = *driveImpersonate + if opt.Impersonate != "" { + conf.Subject = opt.Impersonate } ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config)) return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil } -func createOAuthClient(name string) (*http.Client, error) { +func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) { var oAuthClient *http.Client var err error // try loading service account credentials from env variable, then from a file - serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials")) - serviceAccountPath := config.FileGet(name, "service_account_file") - if len(serviceAccountCreds) == 0 && serviceAccountPath != "" { - loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath)) + if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" { + loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile)) if err != nil { return nil, errors.Wrap(err, "error opening service account credentials file") } - serviceAccountCreds = loadedCreds + opt.ServiceAccountCredentials = string(loadedCreds) } - if len(serviceAccountCreds) > 0 { - oAuthClient, err = getServiceAccountClient(serviceAccountCreds) + if opt.ServiceAccountCredentials != "" { + oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials)) if err != nil { return nil, errors.Wrap(err, "failed to create oauth client from service account") } } else { - oAuthClient, _, err = oauthutil.NewClient(name, driveConfig) + oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig) if err != nil { return nil, errors.Wrap(err, "failed to create oauth client") } @@ -491,15 +582,21 @@ func createOAuthClient(name string) (*http.Client, error) { } // NewFs contstructs an Fs from the path, container:path -func NewFs(name, path string) (fs.Fs, error) { - if !isPowerOfTwo(int64(chunkSize)) { - return nil, errors.Errorf("drive: chunk size %v isn't a power of two", chunkSize) +func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err } - if chunkSize < 256*1024 { - return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize) + if !isPowerOfTwo(int64(opt.ChunkSize)) { + return nil, errors.Errorf("drive: chunk size %v isn't a power of two", opt.ChunkSize) + } + if opt.ChunkSize < 256*1024 { + return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", opt.ChunkSize) } - oAuthClient, err := createOAuthClient(name) + oAuthClient, err := createOAuthClient(opt, name, m) if err != nil { return nil, errors.Wrap(err, "drive: failed when making oauth client") } @@ -512,10 +609,10 @@ func NewFs(name, path string) (fs.Fs, error) { f := &Fs{ name: name, root: root, + opt: *opt, pacer: newPacer(), } - f.teamDriveID = config.FileGet(name, "team_drive") - f.isTeamDrive = f.teamDriveID != "" + f.isTeamDrive = opt.TeamDriveID != "" f.features = (&fs.Features{ DuplicateFiles: true, ReadMimeType: true, @@ -532,20 +629,20 @@ func NewFs(name, path string) (fs.Fs, error) { // set root folder for a team drive or query the user root folder if f.isTeamDrive { - f.rootFolderID = f.teamDriveID + f.rootFolderID = f.opt.TeamDriveID } else { f.rootFolderID = "root" } // override root folder if set in the config - if rootID := config.FileGet(name, "root_folder_id"); rootID != "" { - f.rootFolderID = rootID + if opt.RootFolderID != "" { + f.rootFolderID = opt.RootFolderID } f.dirCache = dircache.New(root, f.rootFolderID, f) // Parse extensions - err = f.parseExtensions(*driveExtensions) + err = f.parseExtensions(opt.Extensions) if err != nil { return nil, err } @@ -729,7 +826,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { when, _ := time.Parse(timeFormatIn, item.ModifiedTime) d := fs.NewDir(remote, when).SetID(item.Id) entries = append(entries, d) - case *driveAuthOwnerOnly && !isAuthOwned(item): + case f.opt.AuthOwnerOnly && !isAuthOwned(item): // ignore object case item.Md5Checksum != "" || item.Size > 0: // If item has MD5 sum or a length it is a file stored on drive @@ -739,7 +836,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { return true } entries = append(entries, o) - case *driveSkipGdocs: + case f.opt.SkipGdocs: fs.Debugf(remote, "Skipping google document type %q", item.MimeType) default: exportMimeTypes, isDocument := f.exportFormats()[item.MimeType] @@ -760,7 +857,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { } obj := o.(*Object) obj.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, item.Id, url.QueryEscape(exportMimeType)) - if *driveAlternateExport { + if f.opt.AlternateExport { switch item.MimeType { case "application/vnd.google-apps.drawing": obj.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", item.Id, extension) @@ -854,11 +951,11 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt } var info *drive.File - if size == 0 || size < int64(driveUploadCutoff) { + if size == 0 || size < int64(f.opt.UploadCutoff) { // Make the API request to upload metadata and file data. // Don't retry, return a retry error instead err = f.pacer.CallNoRetry(func() (bool, error) { - info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do() + info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do() return shouldRetry(err) }) if err != nil { @@ -972,7 +1069,7 @@ func (f *Fs) Rmdir(dir string) error { // trash the directory if it had trashed files // in or the user wants to trash, otherwise // delete it. - err = f.rmdir(directoryID, trashedFiles || *driveUseTrash) + err = f.rmdir(directoryID, trashedFiles || f.opt.UseTrash) if err != nil { return err } @@ -1015,7 +1112,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { var info *drive.File err = o.fs.pacer.Call(func() (bool, error) { - info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do() + info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do() return shouldRetry(err) }) if err != nil { @@ -1040,7 +1137,7 @@ func (f *Fs) Purge() error { return err } err = f.pacer.Call(func() (bool, error) { - if *driveUseTrash { + if f.opt.UseTrash { info := drive.File{ Trashed: true, } @@ -1316,11 +1413,11 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), pollInter err = f.pacer.Call(func() (bool, error) { changesCall := f.svc.Changes.List(pageToken).Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))") - if *driveListChunk > 0 { - changesCall.PageSize(*driveListChunk) + if f.opt.ListChunk > 0 { + changesCall.PageSize(f.opt.ListChunk) } if f.isTeamDrive { - changesCall.TeamDriveId(f.teamDriveID) + changesCall.TeamDriveId(f.opt.TeamDriveID) changesCall.SupportsTeamDrives(true) changesCall.IncludeTeamDriveItems(true) } @@ -1444,7 +1541,7 @@ func (o *Object) setMetaData(info *drive.File) { o.url = fmt.Sprintf("%sfiles/%s?alt=media", o.fs.svc.BasePath, info.Id) o.md5sum = strings.ToLower(info.Md5Checksum) o.bytes = info.Size - if *driveUseCreatedDate { + if o.fs.opt.UseCreatedDate { o.modifiedDate = info.CreatedTime } else { o.modifiedDate = info.ModifiedTime @@ -1617,7 +1714,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { _, res, err := o.httpResponse("GET", options) if err != nil { if isGoogleError(err, "cannotDownloadAbusiveFile") { - if *driveAcknowledgeAbuse { + if o.fs.opt.AcknowledgeAbuse { // Retry acknowledging abuse if strings.ContainsRune(o.url, '?') { o.url += "&" @@ -1663,10 +1760,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // Make the API request to upload metadata and file data. var err error var info *drive.File - if size == 0 || size < int64(driveUploadCutoff) { + if size == 0 || size < int64(o.fs.opt.UploadCutoff) { // Don't retry, return a retry error instead err = o.fs.pacer.CallNoRetry(func() (bool, error) { - info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do() + info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(o.fs.opt.KeepRevisionForever).Do() return shouldRetry(err) }) if err != nil { @@ -1690,7 +1787,7 @@ func (o *Object) Remove() error { } var err error err = o.fs.pacer.Call(func() (bool, error) { - if *driveUseTrash { + if o.fs.opt.UseTrash { info := drive.File{ Trashed: true, } diff --git a/backend/drive/upload.go b/backend/drive/upload.go index 281ffe855..350637dda 100644 --- a/backend/drive/upload.go +++ b/backend/drive/upload.go @@ -58,7 +58,7 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, if f.isTeamDrive { params.Set("supportsTeamDrives", "true") } - if *driveKeepRevisionForever { + if f.opt.KeepRevisionForever { params.Set("keepRevisionForever", "true") } urls := "https://www.googleapis.com/upload/drive/v3/files" @@ -197,11 +197,11 @@ func (rx *resumableUpload) Upload() (*drive.File, error) { start := int64(0) var StatusCode int var err error - buf := make([]byte, int(chunkSize)) + buf := make([]byte, int(rx.f.opt.ChunkSize)) for start < rx.ContentLength { reqSize := rx.ContentLength - start - if reqSize >= int64(chunkSize) { - reqSize = int64(chunkSize) + if reqSize >= int64(rx.f.opt.ChunkSize) { + reqSize = int64(rx.f.opt.ChunkSize) } chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize) diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index 7db639b23..87686bb3b 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -37,7 +37,8 @@ import ( "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/hash" @@ -55,24 +56,6 @@ const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential -) - -var ( - // Description of how to auth for this app - dropboxConfig = &oauth2.Config{ - Scopes: []string{}, - // Endpoint: oauth2.Endpoint{ - // AuthURL: "https://www.dropbox.com/1/oauth2/authorize", - // TokenURL: "https://api.dropboxapi.com/1/oauth2/token", - // }, - Endpoint: dropbox.OAuthEndpoint(""), - ClientID: rcloneClientID, - ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), - RedirectURL: oauthutil.RedirectLocalhostURL, - } - // A regexp matching path names for files Dropbox ignores - // See https://www.dropbox.com/en/help/145 - Ignored files - ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`) // Upload chunk size - setting too small makes uploads slow. // Chunks are buffered into memory for retries. // @@ -96,8 +79,26 @@ var ( // Choose 48MB which is 91% of Maximum speed. rclone by // default does 4 transfers so this should use 4*48MB = 192MB // by default. - uploadChunkSize = fs.SizeSuffix(48 * 1024 * 1024) - maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024) + defaultChunkSize = 48 * 1024 * 1024 + maxChunkSize = 150 * 1024 * 1024 +) + +var ( + // Description of how to auth for this app + dropboxConfig = &oauth2.Config{ + Scopes: []string{}, + // Endpoint: oauth2.Endpoint{ + // AuthURL: "https://www.dropbox.com/1/oauth2/authorize", + // TokenURL: "https://api.dropboxapi.com/1/oauth2/token", + // }, + Endpoint: dropbox.OAuthEndpoint(""), + ClientID: rcloneClientID, + ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), + RedirectURL: oauthutil.RedirectLocalhostURL, + } + // A regexp matching path names for files Dropbox ignores + // See https://www.dropbox.com/en/help/145 - Ignored files + ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`) ) // Register with Fs @@ -106,27 +107,37 @@ func init() { Name: "dropbox", Description: "Dropbox", NewFs: NewFs, - Config: func(name string) { - err := oauthutil.ConfigNoOffline("dropbox", name, dropboxConfig) + Config: func(name string, m configmap.Mapper) { + err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } }, Options: []fs.Option{{ Name: config.ConfigClientID, - Help: "Dropbox App Client Id - leave blank normally.", + Help: "Dropbox App Client Id\nLeave blank normally.", }, { Name: config.ConfigClientSecret, - Help: "Dropbox App Client Secret - leave blank normally.", + Help: "Dropbox App Client Secret\nLeave blank normally.", + }, { + Name: "chunk_size", + Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)), + Default: fs.SizeSuffix(defaultChunkSize), + Advanced: true, }}, }) - flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize)) +} + +// Options defines the configuration for this backend +type Options struct { + ChunkSize fs.SizeSuffix `config:"chunk_size"` } // Fs represents a remote dropbox server type Fs struct { name string // name of this remote root string // the path we are working on + opt Options // parsed options features *fs.Features // optional features srv files.Client // the connection to the dropbox server sharing sharing.Client // as above, but for generating sharing links @@ -185,15 +196,22 @@ func shouldRetry(err error) (bool, error) { } // NewFs contstructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { - if uploadChunkSize > maxUploadChunkSize { - return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize) +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + if opt.ChunkSize > maxChunkSize { + return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize) } // Convert the old token if it exists. The old token was just // just a string, the new one is a JSON blob - oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken)) - if oldToken != "" && oldToken[0] != '{' { + oldToken, ok := m.Get(config.ConfigToken) + oldToken = strings.TrimSpace(oldToken) + if ok && oldToken != "" && oldToken[0] != '{' { fs.Infof(name, "Converting token to new format") newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken) err := config.SetValueAndSave(name, config.ConfigToken, newToken) @@ -202,13 +220,14 @@ func NewFs(name, root string) (fs.Fs, error) { } } - oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig) + oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig) if err != nil { return nil, errors.Wrap(err, "failed to configure dropbox") } f := &Fs{ name: name, + opt: *opt, pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } config := dropbox.Config{ @@ -911,7 +930,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { // unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an // avoidable request to the Dropbox API that does not carry payload. func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) { - chunkSize := int64(uploadChunkSize) + chunkSize := int64(o.fs.opt.ChunkSize) chunks := 0 if size != -1 { chunks = int(size/chunkSize) + 1 @@ -1026,7 +1045,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio size := src.Size() var err error var entry *files.FileMetadata - if size > int64(uploadChunkSize) || size == -1 { + if size > int64(o.fs.opt.ChunkSize) || size == -1 { entry, err = o.uploadChunked(in, commitInfo, size) } else { err = o.fs.pacer.CallNoRetry(func() (bool, error) { diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go index 1066fd0b4..21b7f5e71 100644 --- a/backend/ftp/ftp.go +++ b/backend/ftp/ftp.go @@ -4,16 +4,15 @@ package ftp import ( "io" "net/textproto" - "net/url" "os" "path" - "strings" "sync" "time" "github.com/jlaffaye/ftp" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/lib/readers" @@ -30,33 +29,40 @@ func init() { { Name: "host", Help: "FTP host to connect to", - Optional: false, + Required: true, Examples: []fs.OptionExample{{ Value: "ftp.example.com", Help: "Connect to ftp.example.com", }}, }, { - Name: "user", - Help: "FTP username, leave blank for current username, " + os.Getenv("USER"), - Optional: true, + Name: "user", + Help: "FTP username, leave blank for current username, " + os.Getenv("USER"), }, { - Name: "port", - Help: "FTP port, leave blank to use default (21) ", - Optional: true, + Name: "port", + Help: "FTP port, leave blank to use default (21)", }, { Name: "pass", Help: "FTP password", IsPassword: true, - Optional: false, + Required: true, }, }, }) } +// Options defines the configuration for this backend +type Options struct { + Host string `config:"host"` + User string `config:"user"` + Pass string `config:"pass"` + Port string `config:"port"` +} + // Fs represents a remote FTP server type Fs struct { name string // name of this remote root string // the path we are working on if any + opt Options // parsed options features *fs.Features // optional features url string user string @@ -161,51 +167,33 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) { } // NewFs contstructs an Fs from the path, container:path -func NewFs(name, root string) (ff fs.Fs, err error) { +func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) { // defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err) - // FIXME Convert the old scheme used for the first beta - remove after release - if ftpURL := config.FileGet(name, "url"); ftpURL != "" { - fs.Infof(name, "Converting old configuration") - u, err := url.Parse(ftpURL) - if err != nil { - return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL) - } - parts := strings.Split(u.Host, ":") - config.FileSet(name, "host", parts[0]) - if len(parts) > 1 { - config.FileSet(name, "port", parts[1]) - } - config.FileSet(name, "host", u.Host) - config.FileSet(name, "user", config.FileGet(name, "username")) - config.FileSet(name, "pass", config.FileGet(name, "password")) - config.FileDeleteKey(name, "username") - config.FileDeleteKey(name, "password") - config.FileDeleteKey(name, "url") - config.SaveConfig() - if u.Path != "" && u.Path != "/" { - fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path) - } + // Parse config into Options struct + opt := new(Options) + err = configstruct.Set(m, opt) + if err != nil { + return nil, err } - host := config.FileGet(name, "host") - user := config.FileGet(name, "user") - pass := config.FileGet(name, "pass") - port := config.FileGet(name, "port") - pass, err = obscure.Reveal(pass) + pass, err := obscure.Reveal(opt.Pass) if err != nil { return nil, errors.Wrap(err, "NewFS decrypt password") } + user := opt.User if user == "" { user = os.Getenv("USER") } + port := opt.Port if port == "" { port = "21" } - dialAddr := host + ":" + port + dialAddr := opt.Host + ":" + port u := "ftp://" + path.Join(dialAddr+"/", root) f := &Fs{ name: name, root: root, + opt: *opt, url: u, user: user, pass: pass, diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go index bd454fa26..1d74bccaf 100644 --- a/backend/googlecloudstorage/googlecloudstorage.go +++ b/backend/googlecloudstorage/googlecloudstorage.go @@ -29,7 +29,8 @@ import ( "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fshttp" @@ -55,8 +56,6 @@ const ( ) var ( - gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).") - gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).") // Description of how to auth for this app storageConfig = &oauth2.Config{ Scopes: []string{storage.DevstorageFullControlScope}, @@ -71,29 +70,36 @@ var ( func init() { fs.Register(&fs.RegInfo{ Name: "google cloud storage", + Prefix: "gcs", Description: "Google Cloud Storage (this is not Google Drive)", NewFs: NewFs, - Config: func(name string) { - if config.FileGet(name, "service_account_file") != "" { + Config: func(name string, m configmap.Mapper) { + saFile, _ := m.Get("service_account_file") + saCreds, _ := m.Get("service_account_credentials") + if saFile != "" || saCreds != "" { return } - err := oauthutil.Config("google cloud storage", name, storageConfig) + err := oauthutil.Config("google cloud storage", name, m, storageConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } }, Options: []fs.Option{{ Name: config.ConfigClientID, - Help: "Google Application Client Id - leave blank normally.", + Help: "Google Application Client Id\nLeave blank normally.", }, { Name: config.ConfigClientSecret, - Help: "Google Application Client Secret - leave blank normally.", + Help: "Google Application Client Secret\nLeave blank normally.", }, { Name: "project_number", - Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.", + Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.", }, { Name: "service_account_file", - Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.", + Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", + }, { + Name: "service_account_credentials", + Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", + Hide: fs.OptionHideBoth, }, { Name: "object_acl", Help: "Access Control List for new objects.", @@ -207,22 +213,29 @@ func init() { }) } +// Options defines the configuration for this backend +type Options struct { + ProjectNumber string `config:"project_number"` + ServiceAccountFile string `config:"service_account_file"` + ServiceAccountCredentials string `config:"service_account_credentials"` + ObjectACL string `config:"object_acl"` + BucketACL string `config:"bucket_acl"` + Location string `config:"location"` + StorageClass string `config:"storage_class"` +} + // Fs represents a remote storage server type Fs struct { - name string // name of this remote - root string // the path we are working on if any - features *fs.Features // optional features - svc *storage.Service // the connection to the storage server - client *http.Client // authorized client - bucket string // the bucket we are working on - bucketOKMu sync.Mutex // mutex to protect bucket OK - bucketOK bool // true if we have created the bucket - projectNumber string // used for finding buckets - objectACL string // used when creating new objects - bucketACL string // used when creating new buckets - location string // location of new buckets - storageClass string // storage class of new buckets - pacer *pacer.Pacer // To pace the API calls + name string // name of this remote + root string // the path we are working on if any + opt Options // parsed options + features *fs.Features // optional features + svc *storage.Service // the connection to the storage server + client *http.Client // authorized client + bucket string // the bucket we are working on + bucketOKMu sync.Mutex // mutex to protect bucket OK + bucketOK bool // true if we have created the bucket + pacer *pacer.Pacer // To pace the API calls } // Object describes a storage object @@ -315,27 +328,37 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) { } // NewFs contstructs an Fs from the path, bucket:path -func NewFs(name, root string) (fs.Fs, error) { +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { var oAuthClient *http.Client - var err error + + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + if opt.ObjectACL == "" { + opt.ObjectACL = "private" + } + if opt.BucketACL == "" { + opt.BucketACL = "private" + } // try loading service account credentials from env variable, then from a file - serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials")) - serviceAccountPath := config.FileGet(name, "service_account_file") - if len(serviceAccountCreds) == 0 && serviceAccountPath != "" { - loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath)) + if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" { + loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile)) if err != nil { return nil, errors.Wrap(err, "error opening service account credentials file") } - serviceAccountCreds = loadedCreds + opt.ServiceAccountCredentials = string(loadedCreds) } - if len(serviceAccountCreds) > 0 { - oAuthClient, err = getServiceAccountClient(serviceAccountCreds) + if opt.ServiceAccountCredentials != "" { + oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials)) if err != nil { return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account") } } else { - oAuthClient, _, err = oauthutil.NewClient(name, storageConfig) + oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig) if err != nil { return nil, errors.Wrap(err, "failed to configure Google Cloud Storage") } @@ -347,33 +370,17 @@ func NewFs(name, root string) (fs.Fs, error) { } f := &Fs{ - name: name, - bucket: bucket, - root: directory, - projectNumber: config.FileGet(name, "project_number"), - objectACL: config.FileGet(name, "object_acl"), - bucketACL: config.FileGet(name, "bucket_acl"), - location: config.FileGet(name, "location"), - storageClass: config.FileGet(name, "storage_class"), - pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer), + name: name, + bucket: bucket, + root: directory, + opt: *opt, + pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer), } f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, BucketBased: true, }).Fill(f) - if f.objectACL == "" { - f.objectACL = "private" - } - if f.bucketACL == "" { - f.bucketACL = "private" - } - if *gcsLocation != "" { - f.location = *gcsLocation - } - if *gcsStorageClass != "" { - f.storageClass = *gcsStorageClass - } // Create a new authorized Drive client. f.client = oAuthClient @@ -550,10 +557,10 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) { if dir != "" { return nil, fs.ErrorListBucketRequired } - if f.projectNumber == "" { + if f.opt.ProjectNumber == "" { return nil, errors.New("can't list buckets without project number") } - listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks) + listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks) for { var buckets *storage.Buckets err = f.pacer.Call(func() (bool, error) { @@ -672,17 +679,17 @@ func (f *Fs) Mkdir(dir string) (err error) { return errors.Wrap(err, "failed to get bucket") } - if f.projectNumber == "" { + if f.opt.ProjectNumber == "" { return errors.New("can't make bucket without project number") } bucket := storage.Bucket{ Name: f.bucket, - Location: f.location, - StorageClass: f.storageClass, + Location: f.opt.Location, + StorageClass: f.opt.StorageClass, } err = f.pacer.Call(func() (bool, error) { - _, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do() + _, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do() return shouldRetry(err) }) if err == nil { @@ -948,7 +955,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } var newObject *storage.Object err = o.fs.pacer.CallNoRetry(func() (bool, error) { - newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do() + newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do() return shouldRetry(err) }) if err != nil { diff --git a/backend/http/http.go b/backend/http/http.go index 05437b8a8..cb28f25b9 100644 --- a/backend/http/http.go +++ b/backend/http/http.go @@ -14,7 +14,8 @@ import ( "time" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/lib/rest" @@ -35,7 +36,7 @@ func init() { Options: []fs.Option{{ Name: "url", Help: "URL of http host to connect to", - Optional: false, + Required: true, Examples: []fs.OptionExample{{ Value: "https://example.com", Help: "Connect to example.com", @@ -45,11 +46,17 @@ func init() { fs.Register(fsi) } +// Options defines the configuration for this backend +type Options struct { + Endpoint string `config:"url"` +} + // Fs stores the interface to the remote HTTP files type Fs struct { name string root string features *fs.Features // optional features + opt Options // options for this backend endpoint *url.URL endpointURL string // endpoint as a string httpClient *http.Client @@ -78,14 +85,20 @@ func statusError(res *http.Response, err error) error { // NewFs creates a new Fs object from the name and root. It connects to // the host specified in the config file. -func NewFs(name, root string) (fs.Fs, error) { - endpoint := config.FileGet(name, "url") - if !strings.HasSuffix(endpoint, "/") { - endpoint += "/" +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + if !strings.HasSuffix(opt.Endpoint, "/") { + opt.Endpoint += "/" } // Parse the endpoint and stick the root onto it - base, err := url.Parse(endpoint) + base, err := url.Parse(opt.Endpoint) if err != nil { return nil, err } @@ -130,6 +143,7 @@ func NewFs(name, root string) (fs.Fs, error) { f := &Fs{ name: name, root: root, + opt: *opt, httpClient: client, endpoint: u, endpointURL: u.String(), diff --git a/backend/http/http_internal_test.go b/backend/http/http_internal_test.go index f9b1319b8..bb062ca78 100644 --- a/backend/http/http_internal_test.go +++ b/backend/http/http_internal_test.go @@ -16,6 +16,7 @@ import ( "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fstest" "github.com/ncw/rclone/lib/rest" "github.com/stretchr/testify/assert" @@ -29,7 +30,7 @@ var ( ) // prepareServer the test server and return a function to tidy it up afterwards -func prepareServer(t *testing.T) func() { +func prepareServer(t *testing.T) (configmap.Simple, func()) { // file server for test/files fileServer := http.FileServer(http.Dir(filesPath)) @@ -41,19 +42,24 @@ func prepareServer(t *testing.T) func() { // fs.Config.LogLevel = fs.LogLevelDebug // fs.Config.DumpHeaders = true // fs.Config.DumpBodies = true - config.FileSet(remoteName, "type", "http") - config.FileSet(remoteName, "url", ts.URL) + // config.FileSet(remoteName, "type", "http") + // config.FileSet(remoteName, "url", ts.URL) + + m := configmap.Simple{ + "type": "http", + "url": ts.URL, + } // return a function to tidy up - return ts.Close + return m, ts.Close } // prepare the test server and return a function to tidy it up afterwards func prepare(t *testing.T) (fs.Fs, func()) { - tidy := prepareServer(t) + m, tidy := prepareServer(t) // Instantiate it - f, err := NewFs(remoteName, "") + f, err := NewFs(remoteName, "", m) require.NoError(t, err) return f, tidy @@ -177,20 +183,20 @@ func TestMimeType(t *testing.T) { } func TestIsAFileRoot(t *testing.T) { - tidy := prepareServer(t) + m, tidy := prepareServer(t) defer tidy() - f, err := NewFs(remoteName, "one%.txt") + f, err := NewFs(remoteName, "one%.txt", m) assert.Equal(t, err, fs.ErrorIsFile) testListRoot(t, f) } func TestIsAFileSubDir(t *testing.T) { - tidy := prepareServer(t) + m, tidy := prepareServer(t) defer tidy() - f, err := NewFs(remoteName, "three/underthree.txt") + f, err := NewFs(remoteName, "three/underthree.txt", m) assert.Equal(t, err, fs.ErrorIsFile) entries, err := f.List("") diff --git a/backend/hubic/hubic.go b/backend/hubic/hubic.go index 43291d425..6419315fc 100644 --- a/backend/hubic/hubic.go +++ b/backend/hubic/hubic.go @@ -16,6 +16,8 @@ import ( "github.com/ncw/rclone/backend/swift" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/lib/oauthutil" @@ -52,18 +54,18 @@ func init() { Name: "hubic", Description: "Hubic", NewFs: NewFs, - Config: func(name string) { - err := oauthutil.Config("hubic", name, oauthConfig) + Config: func(name string, m configmap.Mapper) { + err := oauthutil.Config("hubic", name, m, oauthConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } }, Options: []fs.Option{{ Name: config.ConfigClientID, - Help: "Hubic Client Id - leave blank normally.", + Help: "Hubic Client Id\nLeave blank normally.", }, { Name: config.ConfigClientSecret, - Help: "Hubic Client Secret - leave blank normally.", + Help: "Hubic Client Secret\nLeave blank normally.", }}, }) } @@ -145,8 +147,8 @@ func (f *Fs) getCredentials() (err error) { } // NewFs constructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { - client, _, err := oauthutil.NewClient(name, oauthConfig) +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + client, _, err := oauthutil.NewClient(name, m, oauthConfig) if err != nil { return nil, errors.Wrap(err, "failed to configure Hubic") } @@ -167,8 +169,15 @@ func NewFs(name, root string) (fs.Fs, error) { return nil, errors.Wrap(err, "error authenticating swift connection") } + // Parse config into swift.Options struct + opt := new(swift.Options) + err = configstruct.Set(m, opt) + if err != nil { + return nil, err + } + // Make inner swift Fs from the connection - swiftFs, err := swift.NewFsWithConnection(name, root, c, true) + swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true) if err != nil && err != fs.ErrorIsFile { return nil, err } diff --git a/backend/local/local.go b/backend/local/local.go index aaff49a1c..b7ac70c61 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -16,21 +16,14 @@ import ( "unicode/utf8" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/lib/readers" "github.com/pkg/errors" "google.golang.org/appengine/log" ) -var ( - followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.") - skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.") - noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames") - noCheckUpdated = flags.BoolP("local-no-check-updated", "", false, "Don't check to see if the files change during upload") -) - // Constants const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset @@ -41,29 +34,68 @@ func init() { Description: "Local Disk", NewFs: NewFs, Options: []fs.Option{{ - Name: "nounc", - Help: "Disable UNC (long path names) conversion on Windows", - Optional: true, + Name: "nounc", + Help: "Disable UNC (long path names) conversion on Windows", Examples: []fs.OptionExample{{ Value: "true", Help: "Disables long file names", }}, + }, { + Name: "copy_links", + Help: "Follow symlinks and copy the pointed to item.", + Default: false, + NoPrefix: true, + ShortOpt: "L", + Advanced: true, + }, { + Name: "skip_links", + Help: "Don't warn about skipped symlinks.", + Default: false, + NoPrefix: true, + Advanced: true, + }, { + Name: "no_unicode_normalization", + Help: "Don't apply unicode normalization to paths and filenames", + Default: false, + Advanced: true, + }, { + Name: "no_check_updated", + Help: "Don't check to see if the files change during upload", + Default: false, + Advanced: true, + }, { + Name: "one_file_system", + Help: "Don't cross filesystem boundaries (unix/macOS only).", + Default: false, + NoPrefix: true, + ShortOpt: "x", + Advanced: true, }}, } fs.Register(fsi) } +// Options defines the configuration for this backend +type Options struct { + FollowSymlinks bool `config:"copy_links"` + SkipSymlinks bool `config:"skip_links"` + NoUTFNorm bool `config:"no_unicode_normalization"` + NoCheckUpdated bool `config:"no_check_updated"` + NoUNC bool `config:"nounc"` + OneFileSystem bool `config:"one_file_system"` +} + // Fs represents a local filesystem rooted at root type Fs struct { name string // the name of the remote root string // The root directory (OS path) + opt Options // parsed config options features *fs.Features // optional features dev uint64 // device number of root node precisionOk sync.Once // Whether we need to read the precision precision time.Duration // precision of local filesystem wmu sync.Mutex // used for locking access to 'warned'. warned map[string]struct{} // whether we have warned about this string - nounc bool // Skip UNC conversion on Windows // do os.Lstat or os.Stat lstat func(name string) (os.FileInfo, error) dirNames *mapper // directory name mapping @@ -84,18 +116,22 @@ type Object struct { // ------------------------------------------------------------ // NewFs constructs an Fs from the path -func NewFs(name, root string) (fs.Fs, error) { - var err error +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } - if *noUTFNorm { + if opt.NoUTFNorm { log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed") } - nounc := config.FileGet(name, "nounc") f := &Fs{ name: name, + opt: *opt, warned: make(map[string]struct{}), - nounc: nounc == "true", dev: devUnset, lstat: os.Lstat, dirNames: newMapper(), @@ -105,14 +141,14 @@ func NewFs(name, root string) (fs.Fs, error) { CaseInsensitive: f.caseInsensitive(), CanHaveEmptyDirectories: true, }).Fill(f) - if *followSymlinks { + if opt.FollowSymlinks { f.lstat = os.Stat } // Check to see if this points to a file fi, err := f.lstat(f.root) if err == nil { - f.dev = readDevice(fi) + f.dev = readDevice(fi, f.opt.OneFileSystem) } if err == nil && fi.Mode().IsRegular() { // It is a file, so use the parent as the root @@ -243,7 +279,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { newRemote := path.Join(remote, name) newPath := filepath.Join(fsDirPath, name) // Follow symlinks if required - if *followSymlinks && (mode&os.ModeSymlink) != 0 { + if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 { fi, err = os.Stat(newPath) if err != nil { return nil, err @@ -253,7 +289,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { if fi.IsDir() { // Ignore directories which are symlinks. These are junction points under windows which // are kind of a souped up symlink. Unix doesn't have directories which are symlinks. - if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) { + if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) { d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime()) entries = append(entries, d) } @@ -357,7 +393,7 @@ func (f *Fs) Mkdir(dir string) error { if err != nil { return err } - f.dev = readDevice(fi) + f.dev = readDevice(fi, f.opt.OneFileSystem) } return nil } @@ -643,7 +679,7 @@ func (o *Object) Storable() bool { } mode := o.mode if mode&os.ModeSymlink != 0 { - if !*skipSymlinks { + if !o.fs.opt.SkipSymlinks { fs.Logf(o, "Can't follow symlink without -L/--copy-links") } return false @@ -668,7 +704,7 @@ type localOpenFile struct { // Read bytes from the object - see io.Reader func (file *localOpenFile) Read(p []byte) (n int, err error) { - if !*noCheckUpdated { + if !file.o.fs.opt.NoCheckUpdated { // Check if file has the same size and modTime fi, err := file.fd.Stat() if err != nil { @@ -878,7 +914,7 @@ func (f *Fs) cleanPath(s string) string { s = s2 } } - if !f.nounc { + if !f.opt.NoUNC { // Convert to UNC s = uncPath(s) } diff --git a/backend/local/local_internal_test.go b/backend/local/local_internal_test.go index cb9250ae6..d69e7cf22 100644 --- a/backend/local/local_internal_test.go +++ b/backend/local/local_internal_test.go @@ -45,7 +45,7 @@ func TestUpdatingCheck(t *testing.T) { fi, err := fd.Stat() require.NoError(t, err) - o := &Object{size: fi.Size(), modTime: fi.ModTime()} + o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}} wrappedFd := readers.NewLimitedReadCloser(fd, -1) hash, err := hash.NewMultiHasherTypes(hash.Supported) require.NoError(t, err) @@ -65,11 +65,7 @@ func TestUpdatingCheck(t *testing.T) { require.Errorf(t, err, "can't copy - source file is being updated") // turn the checking off and try again - - *noCheckUpdated = true - defer func() { - *noCheckUpdated = false - }() + in.o.fs.opt.NoCheckUpdated = true r.WriteFile(filePath, "content updated", time.Now()) _, err = in.Read(buf) diff --git a/backend/local/read_device_other.go b/backend/local/read_device_other.go index 1429c7ddc..c3fc4f408 100644 --- a/backend/local/read_device_other.go +++ b/backend/local/read_device_other.go @@ -8,6 +8,6 @@ import "os" // readDevice turns a valid os.FileInfo into a device number, // returning devUnset if it fails. -func readDevice(fi os.FileInfo) uint64 { +func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 { return devUnset } diff --git a/backend/local/read_device_unix.go b/backend/local/read_device_unix.go index a8c34e52d..1b2b0c539 100644 --- a/backend/local/read_device_unix.go +++ b/backend/local/read_device_unix.go @@ -9,17 +9,12 @@ import ( "syscall" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config/flags" -) - -var ( - oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.") ) // readDevice turns a valid os.FileInfo into a device number, // returning devUnset if it fails. -func readDevice(fi os.FileInfo) uint64 { - if !*oneFileSystem { +func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 { + if !oneFileSystem { return devUnset } statT, ok := fi.Sys().(*syscall.Stat_t) diff --git a/backend/mega/mega.go b/backend/mega/mega.go index 7eb5bf75c..0f0b7bb02 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -24,8 +24,8 @@ import ( "time" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" @@ -44,7 +44,6 @@ const ( ) var ( - megaDebug = flags.BoolP("mega-debug", "", false, "If set then output more debug from mega.") megaCacheMu sync.Mutex // mutex for the below megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user ) @@ -58,20 +57,33 @@ func init() { Options: []fs.Option{{ Name: "user", Help: "User name", - Optional: true, + Required: true, }, { Name: "pass", Help: "Password.", - Optional: true, + Required: true, IsPassword: true, + }, { + Name: "debug", + Help: "If set then output more debug from mega.", + Default: false, + Advanced: true, }}, }) } +// Options defines the configuration for this backend +type Options struct { + User string `config:"user"` + Pass string `config:"pass"` + Debug bool `config:"debug"` +} + // Fs represents a remote mega type Fs struct { name string // name of this remote root string // the path we are working on + opt Options // parsed config options features *fs.Features // optional features srv *mega.Mega // the connection to the server pacer *pacer.Pacer // pacer for API calls @@ -145,12 +157,16 @@ func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) { } // NewFs constructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { - user := config.FileGet(name, "user") - pass := config.FileGet(name, "pass") - if pass != "" { +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + if opt.Pass != "" { var err error - pass, err = obscure.Reveal(pass) + opt.Pass, err = obscure.Reveal(opt.Pass) if err != nil { return nil, errors.Wrap(err, "couldn't decrypt password") } @@ -163,30 +179,31 @@ func NewFs(name, root string) (fs.Fs, error) { // them up between different remotes. megaCacheMu.Lock() defer megaCacheMu.Unlock() - srv := megaCache[user] + srv := megaCache[opt.User] if srv == nil { srv = mega.New().SetClient(fshttp.NewClient(fs.Config)) srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries srv.SetLogger(func(format string, v ...interface{}) { fs.Infof("*go-mega*", format, v...) }) - if *megaDebug { + if opt.Debug { srv.SetDebugger(func(format string, v ...interface{}) { fs.Debugf("*go-mega*", format, v...) }) } - err := srv.Login(user, pass) + err := srv.Login(opt.User, opt.Pass) if err != nil { return nil, errors.Wrap(err, "couldn't login") } - megaCache[user] = srv + megaCache[opt.User] = srv } root = parsePath(root) f := &Fs{ name: name, root: root, + opt: *opt, srv: srv, pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } @@ -196,7 +213,7 @@ func NewFs(name, root string) (fs.Fs, error) { }).Fill(f) // Find the root node and check if it is a file or not - _, err := f.findRoot(false) + _, err = f.findRoot(false) switch err { case nil: // root node found and is a directory diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 4450c205b..bb3e0d5d1 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -18,7 +18,8 @@ import ( "github.com/ncw/rclone/backend/onedrive/api" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/hash" @@ -73,9 +74,7 @@ var ( RedirectURL: oauthutil.RedirectLocalhostURL, } oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL) - - chunkSize = fs.SizeSuffix(10 * 1024 * 1024) - sharedURL = "https://api.onedrive.com/v1.0/drives" // root URL for remote shared resources + sharedURL = "https://api.onedrive.com/v1.0/drives" // root URL for remote shared resources ) // Register with Fs @@ -84,7 +83,7 @@ func init() { Name: "onedrive", Description: "Microsoft OneDrive", NewFs: NewFs, - Config: func(name string) { + Config: func(name string, m configmap.Mapper) { // choose account type fmt.Printf("Choose OneDrive account type?\n") fmt.Printf(" * Say b for a OneDrive business account\n") @@ -93,12 +92,12 @@ func init() { if isPersonal { // for personal accounts we don't safe a field about the account - err := oauthutil.Config("onedrive", name, oauthPersonalConfig) + err := oauthutil.Config("onedrive", name, m, oauthPersonalConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } } else { - err := oauthutil.ConfigErrorCheck("onedrive", name, func(req *http.Request) oauthutil.AuthError { + err := oauthutil.ConfigErrorCheck("onedrive", name, m, func(req *http.Request) oauthutil.AuthError { var resp oauthutil.AuthError resp.Name = req.URL.Query().Get("error") @@ -113,7 +112,7 @@ func init() { } // Are we running headless? - if config.FileGet(name, config.ConfigAutomatic) != "" { + if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" { // Yes, okay we are done return } @@ -127,7 +126,7 @@ func init() { Services []serviceResource `json:"value"` } - oAuthClient, _, err := oauthutil.NewClient(name, oauthBusinessConfig) + oAuthClient, _, err := oauthutil.NewClient(name, m, oauthBusinessConfig) if err != nil { log.Fatalf("Failed to configure OneDrive: %v", err) return @@ -172,13 +171,13 @@ func init() { foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false) } - config.FileSet(name, configResourceURL, foundService) + m.Set(configResourceURL, foundService) oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService) // get the token from the inital config // we need to update the token with a resource // specific token we will query now - token, err := oauthutil.GetToken(name) + token, err := oauthutil.GetToken(name, m) if err != nil { fs.Errorf(nil, "Error while getting token: %s", err) return @@ -221,7 +220,7 @@ func init() { token.RefreshToken = jsonToken.RefreshToken // finally save them in the config - err = oauthutil.PutToken(name, token, true) + err = oauthutil.PutToken(name, m, token, true) if err != nil { fs.Errorf(nil, "Error while setting token: %s", err) } @@ -229,20 +228,30 @@ func init() { }, Options: []fs.Option{{ Name: config.ConfigClientID, - Help: "Microsoft App Client Id - leave blank normally.", + Help: "Microsoft App Client Id\nLeave blank normally.", }, { Name: config.ConfigClientSecret, - Help: "Microsoft App Client Secret - leave blank normally.", + Help: "Microsoft App Client Secret\nLeave blank normally.", + }, { + Name: "chunk_size", + Help: "Chunk size to upload files with - must be multiple of 320k.", + Default: fs.SizeSuffix(10 * 1024 * 1024), + Advanced: true, }}, }) +} - flags.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.") +// Options defines the configuration for this backend +type Options struct { + ChunkSize fs.SizeSuffix `config:"chunk_size"` + ResourceURL string `config:"resource_url"` } // Fs represents a remote one drive type Fs struct { name string // name of this remote root string // the path we are working on + opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the one drive server dirCache *dircache.DirCache // Map of directory path to directory id @@ -345,27 +354,35 @@ func errorHandler(resp *http.Response) error { } // NewFs constructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { - // get the resource URL from the config file0 - resourceURL := config.FileGet(name, configResourceURL, "") +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + if opt.ChunkSize%(320*1024) != 0 { + return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize) + } // if we have a resource URL it's a business account otherwise a personal one + isBusiness := opt.ResourceURL != "" var rootURL string var oauthConfig *oauth2.Config - if resourceURL == "" { + if !isBusiness { // personal account setup oauthConfig = oauthPersonalConfig rootURL = rootURLPersonal } else { // business account setup oauthConfig = oauthBusinessConfig - rootURL = resourceURL + "_api/v2.0/drives/me" - sharedURL = resourceURL + "_api/v2.0/drives" + rootURL = opt.ResourceURL + "_api/v2.0/drives/me" + sharedURL = opt.ResourceURL + "_api/v2.0/drives" // update the URL in the AuthOptions - oauthBusinessResource = oauth2.SetAuthURLParam("resource", resourceURL) + oauthBusinessResource = oauth2.SetAuthURLParam("resource", opt.ResourceURL) } root = parsePath(root) - oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig) + oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig) if err != nil { log.Fatalf("Failed to configure OneDrive: %v", err) } @@ -373,9 +390,10 @@ func NewFs(name, root string) (fs.Fs, error) { f := &Fs{ name: name, root: root, + opt: *opt, srv: rest.NewClient(oAuthClient).SetRoot(rootURL), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), - isBusiness: resourceURL != "", + isBusiness: isBusiness, } f.features = (&fs.Features{ CaseInsensitive: true, @@ -1225,10 +1243,6 @@ func (o *Object) cancelUploadSession(url string) (err error) { // uploadMultipart uploads a file using multipart upload func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) { - if chunkSize%(320*1024) != 0 { - return nil, errors.Errorf("chunk size %d is not a multiple of 320k", chunkSize) - } - // Create upload session fs.Debugf(o, "Starting multipart upload") session, err := o.createUploadSession(modTime) @@ -1252,7 +1266,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i remaining := size position := int64(0) for remaining > 0 { - n := int64(chunkSize) + n := int64(o.fs.opt.ChunkSize) if remaining < n { n = remaining } diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go index 81b9ac7ad..1d2e8203f 100644 --- a/backend/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -12,7 +12,8 @@ import ( "time" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fshttp" @@ -37,23 +38,30 @@ func init() { Description: "OpenDrive", NewFs: NewFs, Options: []fs.Option{{ - Name: "username", - Help: "Username", + Name: "username", + Help: "Username", + Required: true, }, { Name: "password", Help: "Password.", IsPassword: true, + Required: true, }}, }) } +// Options defines the configuration for this backend +type Options struct { + UserName string `config:"username"` + Password string `config:"password"` +} + // Fs represents a remote server type Fs struct { name string // name of this remote root string // the path we are working on + opt Options // parsed options features *fs.Features // optional features - username string // account name - password string // auth key0 srv *rest.Client // the connection to the server pacer *pacer.Pacer // To pace and retry the API calls session UserSessionInfo // contains the session data @@ -110,27 +118,31 @@ func (f *Fs) DirCacheFlush() { } // NewFs contstructs an Fs from the path, bucket:path -func NewFs(name, root string) (fs.Fs, error) { +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } root = parsePath(root) - username := config.FileGet(name, "username") - if username == "" { + if opt.UserName == "" { return nil, errors.New("username not found") } - password, err := obscure.Reveal(config.FileGet(name, "password")) + opt.Password, err = obscure.Reveal(opt.Password) if err != nil { - return nil, errors.New("password coudl not revealed") + return nil, errors.New("password could not revealed") } - if password == "" { + if opt.Password == "" { return nil, errors.New("password not found") } f := &Fs{ - name: name, - username: username, - password: password, - root: root, - srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), - pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), + name: name, + root: root, + opt: *opt, + srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), + pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } f.dirCache = dircache.New(root, "0", f) @@ -141,7 +153,7 @@ func NewFs(name, root string) (fs.Fs, error) { // get sessionID var resp *http.Response err = f.pacer.Call(func() (bool, error) { - account := Account{Username: username, Password: password} + account := Account{Username: opt.UserName, Password: opt.Password} opts := rest.Opts{ Method: "POST", diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index 741109a3c..8654e9ffa 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -23,6 +23,8 @@ import ( "github.com/ncw/rclone/backend/pcloud/api" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/hash" @@ -65,26 +67,31 @@ func init() { Name: "pcloud", Description: "Pcloud", NewFs: NewFs, - Config: func(name string) { - err := oauthutil.Config("pcloud", name, oauthConfig) + Config: func(name string, m configmap.Mapper) { + err := oauthutil.Config("pcloud", name, m, oauthConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } }, Options: []fs.Option{{ Name: config.ConfigClientID, - Help: "Pcloud App Client Id - leave blank normally.", + Help: "Pcloud App Client Id\nLeave blank normally.", }, { Name: config.ConfigClientSecret, - Help: "Pcloud App Client Secret - leave blank normally.", + Help: "Pcloud App Client Secret\nLeave blank normally.", }}, }) } +// Options defines the configuration for this backend +type Options struct { +} + // Fs represents a remote pcloud type Fs struct { name string // name of this remote root string // the path we are working on + opt Options // parsed options features *fs.Features // optional features srv *rest.Client // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id @@ -229,9 +236,15 @@ func errorHandler(resp *http.Response) error { } // NewFs constructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } root = parsePath(root) - oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig) + oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig) if err != nil { log.Fatalf("Failed to configure Pcloud: %v", err) } @@ -239,6 +252,7 @@ func NewFs(name, root string) (fs.Fs, error) { f := &Fs{ name: name, root: root, + opt: *opt, srv: rest.NewClient(oAuthClient).SetRoot(rootURL), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index e8c7cb159..b5ca24e04 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -17,7 +17,8 @@ import ( "time" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/walk" @@ -34,49 +35,43 @@ func init() { Description: "QingCloud Object Storage", NewFs: NewFs, Options: []fs.Option{{ - Name: "env_auth", - Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.", - Examples: []fs.OptionExample{ - { - Value: "false", - Help: "Enter QingStor credentials in the next step", - }, { - Value: "true", - Help: "Get QingStor credentials from the environment (env vars or IAM)", - }, - }, + Name: "env_auth", + Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.", + Default: false, + Examples: []fs.OptionExample{{ + Value: "false", + Help: "Enter QingStor credentials in the next step", + }, { + Value: "true", + Help: "Get QingStor credentials from the environment (env vars or IAM)", + }}, }, { Name: "access_key_id", - Help: "QingStor Access Key ID - leave blank for anonymous access or runtime credentials.", + Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.", }, { Name: "secret_access_key", - Help: "QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.", + Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.", }, { Name: "endpoint", Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"", }, { Name: "zone", - Help: "Choose or Enter a zone to connect. Default is \"pek3a\".", - Examples: []fs.OptionExample{ - { - Value: "pek3a", - - Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.", - }, - { - Value: "sh1a", - - Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.", - }, - { - Value: "gd2a", - - Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.", - }, - }, + Help: "Zone to connect to.\nDefault is \"pek3a\".", + Examples: []fs.OptionExample{{ + Value: "pek3a", + Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.", + }, { + Value: "sh1a", + Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.", + }, { + Value: "gd2a", + Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.", + }}, }, { - Name: "connection_retries", - Help: "Number of connnection retry.\nLeave blank will use the default value \"3\".", + Name: "connection_retries", + Help: "Number of connnection retries.", + Default: 3, + Advanced: true, }}, }) } @@ -95,17 +90,28 @@ func timestampToTime(tp int64) time.Time { return tm.UTC() } +// Options defines the configuration for this backend +type Options struct { + EnvAuth bool `config:"env_auth"` + AccessKeyID string `config:"access_key_id"` + SecretAccessKey string `config:"secret_access_key"` + Endpoint string `config:"endpoint"` + Zone string `config:"zone"` + ConnectionRetries int `config:"connection_retries"` +} + // Fs represents a remote qingstor server type Fs struct { name string // The name of the remote + root string // The root is a subdir, is a special object + opt Options // parsed options + features *fs.Features // optional features + svc *qs.Service // The connection to the qingstor server zone string // The zone we are working on bucket string // The bucket we are working on bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted bucketOK bool // true if we have created the bucket bucketDeleted bool // true if we have deleted the bucket - root string // The root is a subdir, is a special object - features *fs.Features // optional features - svc *qs.Service // The connection to the qingstor server } // Object describes a qingstor object @@ -165,12 +171,12 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) { } // qsConnection makes a connection to qingstor -func qsServiceConnection(name string) (*qs.Service, error) { - accessKeyID := config.FileGet(name, "access_key_id") - secretAccessKey := config.FileGet(name, "secret_access_key") +func qsServiceConnection(opt *Options) (*qs.Service, error) { + accessKeyID := opt.AccessKeyID + secretAccessKey := opt.SecretAccessKey switch { - case config.FileGetBool(name, "env_auth", false): + case opt.EnvAuth: // No need for empty checks if "env_auth" is true case accessKeyID == "" && secretAccessKey == "": // if no access key/secret and iam is explicitly disabled then fall back to anon interaction @@ -184,7 +190,7 @@ func qsServiceConnection(name string) (*qs.Service, error) { host := "qingstor.com" port := 443 - endpoint := config.FileGet(name, "endpoint", "") + endpoint := opt.Endpoint if endpoint != "" { _protocol, _host, _port, err := qsParseEndpoint(endpoint) @@ -204,48 +210,49 @@ func qsServiceConnection(name string) (*qs.Service, error) { } - connectionRetries := 3 - retries := config.FileGet(name, "connection_retries", "") - if retries != "" { - connectionRetries, _ = strconv.Atoi(retries) - } - cf, err := qsConfig.NewDefault() + if err != nil { + return nil, err + } cf.AccessKeyID = accessKeyID cf.SecretAccessKey = secretAccessKey cf.Protocol = protocol cf.Host = host cf.Port = port - cf.ConnectionRetries = connectionRetries + cf.ConnectionRetries = opt.ConnectionRetries cf.Connection = fshttp.NewClient(fs.Config) - svc, _ := qs.Init(cf) - - return svc, err + return qs.Init(cf) } // NewFs constructs an Fs from the path, bucket:path -func NewFs(name, root string) (fs.Fs, error) { +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } bucket, key, err := qsParsePath(root) if err != nil { return nil, err } - svc, err := qsServiceConnection(name) + svc, err := qsServiceConnection(opt) if err != nil { return nil, err } - zone := config.FileGet(name, "zone") - if zone == "" { - zone = "pek3a" + if opt.Zone == "" { + opt.Zone = "pek3a" } f := &Fs{ name: name, - zone: zone, root: key, - bucket: bucket, + opt: *opt, svc: svc, + zone: opt.Zone, + bucket: bucket, } f.features = (&fs.Features{ ReadMimeType: true, @@ -258,7 +265,7 @@ func NewFs(name, root string) (fs.Fs, error) { f.root += "/" } //Check to see if the object exists - bucketInit, err := svc.Bucket(bucket, zone) + bucketInit, err := svc.Bucket(bucket, opt.Zone) if err != nil { return nil, err } diff --git a/backend/s3/s3.go b/backend/s3/s3.go index da094bfad..5fcdb6b90 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -37,8 +37,8 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/walk" @@ -82,8 +82,9 @@ func init() { Help: "Any other S3 compatible provider", }}, }, { - Name: "env_auth", - Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.", + Name: "env_auth", + Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.", + Default: false, Examples: []fs.OptionExample{{ Value: "false", Help: "Enter AWS credentials in the next step", @@ -93,10 +94,10 @@ func init() { }}, }, { Name: "access_key_id", - Help: "AWS Access Key ID - leave blank for anonymous access or runtime credentials.", + Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.", }, { Name: "secret_access_key", - Help: "AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.", + Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.", }, { Name: "region", Help: "Region to connect to.", @@ -146,7 +147,7 @@ func init() { }}, }, { Name: "region", - Help: "Region to connect to. Leave blank if you are using an S3 clone and you don't have a region.", + Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.", Provider: "!AWS", Examples: []fs.OptionExample{{ Value: "", @@ -293,7 +294,7 @@ func init() { }}, }, { Name: "location_constraint", - Help: "Location constraint - must be set to match the Region. Used when creating buckets only.", + Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.", Provider: "AWS", Examples: []fs.OptionExample{{ Value: "", @@ -340,7 +341,7 @@ func init() { }}, }, { Name: "location_constraint", - Help: "Location constraint - must match endpoint when using IBM Cloud Public. For on-prem COS, do not make a selection from this list, hit enter", + Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter", Provider: "IBMCOS", Examples: []fs.OptionExample{{ Value: "us-standard", @@ -441,7 +442,7 @@ func init() { }}, }, { Name: "location_constraint", - Help: "Location constraint - must be set to match the Region. Leave blank if not sure. Used when creating buckets only.", + Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.", Provider: "!AWS,IBMCOS", }, { Name: "acl", @@ -518,10 +519,28 @@ func init() { Value: "ONEZONE_IA", Help: "One Zone Infrequent Access storage class", }}, - }, - }, + }, { + Name: "chunk_size", + Help: "Chunk size to use for uploading", + Default: fs.SizeSuffix(s3manager.MinUploadPartSize), + Advanced: true, + }, { + Name: "disable_checksum", + Help: "Don't store MD5 checksum with object metadata", + Default: false, + Advanced: true, + }, { + Name: "session_token", + Help: "An AWS session token", + Hide: fs.OptionHideBoth, + Advanced: true, + }, { + Name: "upload_concurrency", + Help: "Concurrency for multipart uploads.", + Default: 2, + Advanced: true, + }}, }) - flags.VarP(&s3ChunkSize, "s3-chunk-size", "", "Chunk size to use for uploading") } // Constants @@ -534,31 +553,36 @@ const ( maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size ) -// Globals -var ( - // Flags - s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3") - s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA|ONEZONE_IA)") - s3ChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize) - s3DisableChecksum = flags.BoolP("s3-disable-checksum", "", false, "Don't store MD5 checksum with object metadata") - s3UploadConcurrency = flags.IntP("s3-upload-concurrency", "", 2, "Concurrency for multipart uploads") -) +// Options defines the configuration for this backend +type Options struct { + Provider string `config:"provider"` + EnvAuth bool `config:"env_auth"` + AccessKeyID string `config:"access_key_id"` + SecretAccessKey string `config:"secret_access_key"` + Region string `config:"region"` + Endpoint string `config:"endpoint"` + LocationConstraint string `config:"location_constraint"` + ACL string `config:"acl"` + ServerSideEncryption string `config:"server_side_encryption"` + StorageClass string `config:"storage_class"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + DisableChecksum bool `config:"disable_checksum"` + SessionToken string `config:"session_token"` + UploadConcurrency int `config:"upload_concurrency"` +} // Fs represents a remote s3 server type Fs struct { - name string // the name of the remote - root string // root of the bucket - ignore all objects above this - features *fs.Features // optional features - c *s3.S3 // the connection to the s3 server - ses *session.Session // the s3 session - bucket string // the bucket we are working on - bucketOKMu sync.Mutex // mutex to protect bucket OK - bucketOK bool // true if we have created the bucket - bucketDeleted bool // true if we have deleted the bucket - acl string // ACL for new buckets / objects - locationConstraint string // location constraint of new buckets - sse string // the type of server-side encryption - storageClass string // storage class + name string // the name of the remote + root string // root of the bucket - ignore all objects above this + opt Options // parsed options + features *fs.Features // optional features + c *s3.S3 // the connection to the s3 server + ses *session.Session // the s3 session + bucket string // the bucket we are working on + bucketOKMu sync.Mutex // mutex to protect bucket OK + bucketOK bool // true if we have created the bucket + bucketDeleted bool // true if we have deleted the bucket } // Object describes a s3 object @@ -620,12 +644,12 @@ func s3ParsePath(path string) (bucket, directory string, err error) { } // s3Connection makes a connection to s3 -func s3Connection(name string) (*s3.S3, *session.Session, error) { +func s3Connection(opt *Options) (*s3.S3, *session.Session, error) { // Make the auth v := credentials.Value{ - AccessKeyID: config.FileGet(name, "access_key_id"), - SecretAccessKey: config.FileGet(name, "secret_access_key"), - SessionToken: config.FileGet(name, "session_token"), + AccessKeyID: opt.AccessKeyID, + SecretAccessKey: opt.SecretAccessKey, + SessionToken: opt.SessionToken, } lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service @@ -660,7 +684,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) { cred := credentials.NewChainCredentials(providers) switch { - case config.FileGetBool(name, "env_auth", false): + case opt.EnvAuth: // No need for empty checks if "env_auth" is true case v.AccessKeyID == "" && v.SecretAccessKey == "": // if no access key/secret and iam is explicitly disabled then fall back to anon interaction @@ -671,26 +695,24 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) { return nil, nil, errors.New("secret_access_key not found") } - endpoint := config.FileGet(name, "endpoint") - region := config.FileGet(name, "region") - if region == "" && endpoint == "" { - endpoint = "https://s3.amazonaws.com/" + if opt.Region == "" && opt.Endpoint == "" { + opt.Endpoint = "https://s3.amazonaws.com/" } - if region == "" { - region = "us-east-1" + if opt.Region == "" { + opt.Region = "us-east-1" } awsConfig := aws.NewConfig(). - WithRegion(region). + WithRegion(opt.Region). WithMaxRetries(maxRetries). WithCredentials(cred). - WithEndpoint(endpoint). + WithEndpoint(opt.Endpoint). WithHTTPClient(fshttp.NewClient(fs.Config)). WithS3ForcePathStyle(true) // awsConfig.WithLogLevel(aws.LogDebugWithSigning) ses := session.New() c := s3.New(ses, awsConfig) - if region == "other-v2-signature" { - fs.Debugf(name, "Using v2 auth") + if opt.Region == "other-v2-signature" { + fs.Debugf(nil, "Using v2 auth") signer := func(req *request.Request) { // Ignore AnonymousCredentials object if req.Config.Credentials == credentials.AnonymousCredentials { @@ -706,40 +728,37 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) { } // NewFs constructs an Fs from the path, bucket:path -func NewFs(name, root string) (fs.Fs, error) { +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) { + return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize)) + } bucket, directory, err := s3ParsePath(root) if err != nil { return nil, err } - c, ses, err := s3Connection(name) + c, ses, err := s3Connection(opt) if err != nil { return nil, err } f := &Fs{ - name: name, - c: c, - bucket: bucket, - ses: ses, - acl: config.FileGet(name, "acl"), - root: directory, - locationConstraint: config.FileGet(name, "location_constraint"), - sse: config.FileGet(name, "server_side_encryption"), - storageClass: config.FileGet(name, "storage_class"), + name: name, + root: directory, + opt: *opt, + c: c, + bucket: bucket, + ses: ses, } f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, BucketBased: true, }).Fill(f) - if *s3ACL != "" { - f.acl = *s3ACL - } - if *s3StorageClass != "" { - f.storageClass = *s3StorageClass - } - if s3ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) { - return nil, errors.Errorf("s3 chunk size must be >= %v", fs.SizeSuffix(s3manager.MinUploadPartSize)) - } if f.root != "" { f.root += "/" // Check to see if the object exists @@ -1064,11 +1083,11 @@ func (f *Fs) Mkdir(dir string) error { } req := s3.CreateBucketInput{ Bucket: &f.bucket, - ACL: &f.acl, + ACL: &f.opt.ACL, } - if f.locationConstraint != "" { + if f.opt.LocationConstraint != "" { req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ - LocationConstraint: &f.locationConstraint, + LocationConstraint: &f.opt.LocationConstraint, } } _, err := f.c.CreateBucket(&req) @@ -1297,7 +1316,7 @@ func (o *Object) SetModTime(modTime time.Time) error { directive := s3.MetadataDirectiveReplace // replace metadata with that passed in req := s3.CopyObjectInput{ Bucket: &o.fs.bucket, - ACL: &o.fs.acl, + ACL: &o.fs.opt.ACL, Key: &key, ContentType: &mimeType, CopySource: aws.String(pathEscape(sourceKey)), @@ -1353,10 +1372,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio size := src.Size() uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) { - u.Concurrency = *s3UploadConcurrency + u.Concurrency = o.fs.opt.UploadConcurrency u.LeavePartsOnError = false u.S3 = o.fs.c - u.PartSize = int64(s3ChunkSize) + u.PartSize = int64(o.fs.opt.ChunkSize) if size == -1 { // Make parts as small as possible while still being able to upload to the @@ -1376,7 +1395,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio metaMtime: aws.String(swift.TimeToFloatString(modTime)), } - if !*s3DisableChecksum && size > uploader.PartSize { + if !o.fs.opt.DisableChecksum && size > uploader.PartSize { hash, err := src.Hash(hash.MD5) if err == nil && matchMd5.MatchString(hash) { @@ -1394,18 +1413,18 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio key := o.fs.root + o.remote req := s3manager.UploadInput{ Bucket: &o.fs.bucket, - ACL: &o.fs.acl, + ACL: &o.fs.opt.ACL, Key: &key, Body: in, ContentType: &mimeType, Metadata: metadata, //ContentLength: &size, } - if o.fs.sse != "" { - req.ServerSideEncryption = &o.fs.sse + if o.fs.opt.ServerSideEncryption != "" { + req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption } - if o.fs.storageClass != "" { - req.StorageClass = &o.fs.storageClass + if o.fs.opt.StorageClass != "" { + req.StorageClass = &o.fs.opt.StorageClass } _, err = uploader.Upload(&req) if err != nil { diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index 23b915c9f..d27a43c93 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -20,7 +20,8 @@ import ( "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" @@ -38,10 +39,6 @@ const ( var ( currentUser = readCurrentUser() - - // Flags - sftpAskPassword = flags.BoolP("sftp-ask-password", "", false, "Allow asking for SFTP password when needed.") - sshPathOverride = flags.StringP("ssh-path-override", "", "", "Override path used by SSH connection.") ) func init() { @@ -52,32 +49,28 @@ func init() { Options: []fs.Option{{ Name: "host", Help: "SSH host to connect to", - Optional: false, + Required: true, Examples: []fs.OptionExample{{ Value: "example.com", Help: "Connect to example.com", }}, }, { - Name: "user", - Help: "SSH username, leave blank for current username, " + currentUser, - Optional: true, + Name: "user", + Help: "SSH username, leave blank for current username, " + currentUser, }, { - Name: "port", - Help: "SSH port, leave blank to use default (22)", - Optional: true, + Name: "port", + Help: "SSH port, leave blank to use default (22)", }, { Name: "pass", Help: "SSH password, leave blank to use ssh-agent.", - Optional: true, IsPassword: true, }, { - Name: "key_file", - Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.", - Optional: true, + Name: "key_file", + Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.", }, { - Name: "use_insecure_cipher", - Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.", - Optional: true, + Name: "use_insecure_cipher", + Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.", + Default: false, Examples: []fs.OptionExample{ { Value: "false", @@ -88,30 +81,56 @@ func init() { }, }, }, { - Name: "disable_hashcheck", - Help: "Disable the execution of SSH commands to determine if remote file hashing is available. Leave blank or set to false to enable hashing (recommended), set to true to disable hashing.", - Optional: true, + Name: "disable_hashcheck", + Default: false, + Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.", + }, { + Name: "ask_password", + Default: false, + Help: "Allow asking for SFTP password when needed.", + Advanced: true, + }, { + Name: "path_override", + Default: "", + Help: "Override path used by SSH connection.", + Advanced: true, + }, { + Name: "set_modtime", + Default: true, + Help: "Set the modified time on the remote if set.", + Advanced: true, }}, } fs.Register(fsi) } +// Options defines the configuration for this backend +type Options struct { + Host string `config:"host"` + User string `config:"user"` + Port string `config:"port"` + Pass string `config:"pass"` + KeyFile string `config:"key_file"` + UseInsecureCipher bool `config:"use_insecure_cipher"` + DisableHashCheck bool `config:"disable_hashcheck"` + AskPassword bool `config:"ask_password"` + PathOverride string `config:"path_override"` + SetModTime bool `config:"set_modtime"` +} + // Fs stores the interface to the remote SFTP files type Fs struct { - name string - root string - features *fs.Features // optional features - config *ssh.ClientConfig - host string - port string - url string - mkdirLock *stringLock - cachedHashes *hash.Set - hashcheckDisabled bool - setModtime bool - poolMu sync.Mutex - pool []*conn - connLimit *rate.Limiter // for limiting number of connections per second + name string + root string + opt Options // parsed options + features *fs.Features // optional features + config *ssh.ClientConfig + url string + mkdirLock *stringLock + cachedHashes *hash.Set + poolMu sync.Mutex + pool []*conn + connLimit *rate.Limiter // for limiting number of connections per second } // Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading) @@ -197,7 +216,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) { c = &conn{ err: make(chan error, 1), } - c.sshClient, err = Dial("tcp", f.host+":"+f.port, f.config) + c.sshClient, err = Dial("tcp", f.opt.Host+":"+f.opt.Port, f.config) if err != nil { return nil, errors.Wrap(err, "couldn't connect SSH") } @@ -270,35 +289,33 @@ func (f *Fs) putSftpConnection(pc **conn, err error) { // NewFs creates a new Fs object from the name and root. It connects to // the host specified in the config file. -func NewFs(name, root string) (fs.Fs, error) { - user := config.FileGet(name, "user") - host := config.FileGet(name, "host") - port := config.FileGet(name, "port") - pass := config.FileGet(name, "pass") - keyFile := config.FileGet(name, "key_file") - insecureCipher := config.FileGetBool(name, "use_insecure_cipher") - hashcheckDisabled := config.FileGetBool(name, "disable_hashcheck") - setModtime := config.FileGetBool(name, "set_modtime", true) - if user == "" { - user = currentUser +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err } - if port == "" { - port = "22" + if opt.User == "" { + opt.User = currentUser + } + if opt.Port == "" { + opt.Port = "22" } sshConfig := &ssh.ClientConfig{ - User: user, + User: opt.User, Auth: []ssh.AuthMethod{}, HostKeyCallback: ssh.InsecureIgnoreHostKey(), Timeout: fs.Config.ConnectTimeout, } - if insecureCipher { + if opt.UseInsecureCipher { sshConfig.Config.SetDefaults() sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc") } // Add ssh agent-auth if no password or file specified - if pass == "" && keyFile == "" { + if opt.Pass == "" && opt.KeyFile == "" { sshAgentClient, _, err := sshagent.New() if err != nil { return nil, errors.Wrap(err, "couldn't connect to ssh-agent") @@ -311,8 +328,8 @@ func NewFs(name, root string) (fs.Fs, error) { } // Load key file if specified - if keyFile != "" { - key, err := ioutil.ReadFile(keyFile) + if opt.KeyFile != "" { + key, err := ioutil.ReadFile(opt.KeyFile) if err != nil { return nil, errors.Wrap(err, "failed to read private key file") } @@ -324,8 +341,8 @@ func NewFs(name, root string) (fs.Fs, error) { } // Auth from password if specified - if pass != "" { - clearpass, err := obscure.Reveal(pass) + if opt.Pass != "" { + clearpass, err := obscure.Reveal(opt.Pass) if err != nil { return nil, err } @@ -333,23 +350,20 @@ func NewFs(name, root string) (fs.Fs, error) { } // Ask for password if none was defined and we're allowed to - if pass == "" && *sftpAskPassword { + if opt.Pass == "" && opt.AskPassword { _, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ") clearpass := config.ReadPassword() sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass)) } f := &Fs{ - name: name, - root: root, - config: sshConfig, - host: host, - port: port, - url: "sftp://" + user + "@" + host + ":" + port + "/" + root, - hashcheckDisabled: hashcheckDisabled, - setModtime: setModtime, - mkdirLock: newStringLock(), - connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1), + name: name, + root: root, + opt: *opt, + config: sshConfig, + url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root, + mkdirLock: newStringLock(), + connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1), } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, @@ -663,7 +677,7 @@ func (f *Fs) Hashes() hash.Set { return *f.cachedHashes } - if f.hashcheckDisabled { + if f.opt.DisableHashCheck { return hash.Set(hash.None) } @@ -758,8 +772,8 @@ func (o *Object) Hash(r hash.Type) (string, error) { session.Stdout = &stdout session.Stderr = &stderr escapedPath := shellEscape(o.path()) - if *sshPathOverride != "" { - escapedPath = shellEscape(path.Join(*sshPathOverride, o.remote)) + if o.fs.opt.PathOverride != "" { + escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote)) } err = session.Run(hashCmd + " " + escapedPath) if err != nil { @@ -852,7 +866,7 @@ func (o *Object) SetModTime(modTime time.Time) error { if err != nil { return errors.Wrap(err, "SetModTime") } - if o.fs.setModtime { + if o.fs.opt.SetModTime { err = c.sftpClient.Chtimes(o.path(), modTime, modTime) o.fs.putSftpConnection(&c, err) if err != nil { diff --git a/backend/swift/swift.go b/backend/swift/swift.go index bd0dd1250..c5b9eebe2 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -14,8 +14,8 @@ import ( "time" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" - "github.com/ncw/rclone/fs/config/flags" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" @@ -31,11 +31,6 @@ const ( listChunks = 1000 // chunk size to read directory listings ) -// Globals -var ( - chunkSize = fs.SizeSuffix(5 * 1024 * 1024 * 1024) -) - // Register with Fs func init() { fs.Register(&fs.RegInfo{ @@ -43,8 +38,9 @@ func init() { Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)", NewFs: NewFs, Options: []fs.Option{{ - Name: "env_auth", - Help: "Get swift credentials from environment variables in standard OpenStack form.", + Name: "env_auth", + Help: "Get swift credentials from environment variables in standard OpenStack form.", + Default: false, Examples: []fs.OptionExample{ { Value: "false", @@ -107,11 +103,13 @@ func init() { Name: "auth_token", Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)", }, { - Name: "auth_version", - Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)", + Name: "auth_version", + Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)", + Default: 0, }, { - Name: "endpoint_type", - Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)", + Name: "endpoint_type", + Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)", + Default: "public", Examples: []fs.OptionExample{{ Help: "Public (default, choose this if not sure)", Value: "public", @@ -122,10 +120,32 @@ func init() { Help: "Admin", Value: "admin", }}, - }, - }, + }, { + Name: "chunk_size", + Help: "Above this size files will be chunked into a _segments container.", + Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024), + Advanced: true, + }}, }) - flags.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.") +} + +// Options defines the configuration for this backend +type Options struct { + EnvAuth bool `config:"env_auth"` + User string `config:"user"` + Key string `config:"key"` + Auth string `config:"auth"` + UserID string `config:"user_id"` + Domain string `config:"domain"` + Tenant string `config:"tenant"` + TenantID string `config:"tenant_id"` + TenantDomain string `config:"tenant_domain"` + Region string `config:"region"` + StorageURL string `config:"storage_url"` + AuthToken string `config:"auth_token"` + AuthVersion int `config:"auth_version"` + EndpointType string `config:"endpoint_type"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` } // Fs represents a remote swift server @@ -133,6 +153,7 @@ type Fs struct { name string // name of this remote root string // the path we are working on if any features *fs.Features // optional features + opt Options // options for this backend c *swift.Connection // the connection to the swift server container string // the container we are working on containerOKMu sync.Mutex // mutex to protect container OK @@ -195,27 +216,27 @@ func parsePath(path string) (container, directory string, err error) { } // swiftConnection makes a connection to swift -func swiftConnection(name string) (*swift.Connection, error) { +func swiftConnection(opt *Options, name string) (*swift.Connection, error) { c := &swift.Connection{ // Keep these in the same order as the Config for ease of checking - UserName: config.FileGet(name, "user"), - ApiKey: config.FileGet(name, "key"), - AuthUrl: config.FileGet(name, "auth"), - UserId: config.FileGet(name, "user_id"), - Domain: config.FileGet(name, "domain"), - Tenant: config.FileGet(name, "tenant"), - TenantId: config.FileGet(name, "tenant_id"), - TenantDomain: config.FileGet(name, "tenant_domain"), - Region: config.FileGet(name, "region"), - StorageUrl: config.FileGet(name, "storage_url"), - AuthToken: config.FileGet(name, "auth_token"), - AuthVersion: config.FileGetInt(name, "auth_version", 0), - EndpointType: swift.EndpointType(config.FileGet(name, "endpoint_type", "public")), + UserName: opt.User, + ApiKey: opt.Key, + AuthUrl: opt.Auth, + UserId: opt.UserID, + Domain: opt.Domain, + Tenant: opt.Tenant, + TenantId: opt.TenantID, + TenantDomain: opt.TenantDomain, + Region: opt.Region, + StorageUrl: opt.StorageURL, + AuthToken: opt.AuthToken, + AuthVersion: opt.AuthVersion, + EndpointType: swift.EndpointType(opt.EndpointType), ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport Transport: fshttp.NewTransport(fs.Config), } - if config.FileGetBool(name, "env_auth", false) { + if opt.EnvAuth { err := c.ApplyEnvironment() if err != nil { return nil, errors.Wrap(err, "failed to read environment variables") @@ -251,13 +272,14 @@ func swiftConnection(name string) (*swift.Connection, error) { // // if noCheckContainer is set then the Fs won't check the container // exists before creating it. -func NewFsWithConnection(name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) { +func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) { container, directory, err := parsePath(root) if err != nil { return nil, err } f := &Fs{ name: name, + opt: *opt, c: c, container: container, segmentsContainer: container + "_segments", @@ -288,12 +310,19 @@ func NewFsWithConnection(name, root string, c *swift.Connection, noCheckContaine } // NewFs contstructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { - c, err := swiftConnection(name) +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) if err != nil { return nil, err } - return NewFsWithConnection(name, root, c, false) + + c, err := swiftConnection(opt, name) + if err != nil { + return nil, err + } + return NewFsWithConnection(opt, name, root, c, false) } // Return an Object from a path @@ -871,7 +900,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, fs.Debugf(o, "Uploading segments into %q seems done (%v)", o.fs.segmentsContainer, err) break } - n := int64(chunkSize) + n := int64(o.fs.opt.ChunkSize) if size != -1 { n = min(left, n) headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it @@ -921,7 +950,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio contentType := fs.MimeType(src) headers := m.ObjectHeaders() uniquePrefix := "" - if size > int64(chunkSize) || size == -1 { + if size > int64(o.fs.opt.ChunkSize) || size == -1 { uniquePrefix, err = o.updateChunks(in, headers, size, contentType) if err != nil { return err diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go index 142345ff3..6872d7446 100644 --- a/backend/webdav/webdav.go +++ b/backend/webdav/webdav.go @@ -32,6 +32,8 @@ import ( "github.com/ncw/rclone/backend/webdav/odrvcookie" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fshttp" @@ -56,15 +58,14 @@ func init() { Options: []fs.Option{{ Name: "url", Help: "URL of http host to connect to", - Optional: false, + Required: true, Examples: []fs.OptionExample{{ Value: "https://example.com", Help: "Connect to example.com", }}, }, { - Name: "vendor", - Help: "Name of the Webdav site/service/software you are using", - Optional: false, + Name: "vendor", + Help: "Name of the Webdav site/service/software you are using", Examples: []fs.OptionExample{{ Value: "nextcloud", Help: "Nextcloud", @@ -79,34 +80,37 @@ func init() { Help: "Other site/service or software", }}, }, { - Name: "user", - Help: "User name", - Optional: true, + Name: "user", + Help: "User name", }, { Name: "pass", Help: "Password.", - Optional: true, IsPassword: true, }, { - Name: "bearer_token", - Help: "Bearer token instead of user/pass (eg a Macaroon)", - Optional: true, + Name: "bearer_token", + Help: "Bearer token instead of user/pass (eg a Macaroon)", }}, }) } +// Options defines the configuration for this backend +type Options struct { + URL string `config:"url"` + Vendor string `config:"vendor"` + User string `config:"user"` + Pass string `config:"pass"` +} + // Fs represents a remote webdav type Fs struct { name string // name of this remote root string // the path we are working on + opt Options // parsed options features *fs.Features // optional features endpoint *url.URL // URL of the host endpointURL string // endpoint as a string srv *rest.Client // the connection to the one drive server pacer *pacer.Pacer // pacer for API calls - user string // username - pass string // password - vendor string // name of the vendor precision time.Duration // mod time precision canStream bool // set if can stream useOCMtime bool // set if can use X-OC-Mtime @@ -264,10 +268,12 @@ func (o *Object) filePath() string { } // NewFs constructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { - endpoint := config.FileGet(name, "url") - if !strings.HasSuffix(endpoint, "/") { - endpoint += "/" +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err } rootIsDir := strings.HasSuffix(root, "/") root = strings.Trim(root, "/") @@ -275,17 +281,23 @@ func NewFs(name, root string) (fs.Fs, error) { user := config.FileGet(name, "user") pass := config.FileGet(name, "pass") bearerToken := config.FileGet(name, "bearer_token") - if pass != "" { + if !strings.HasSuffix(opt.URL, "/") { + opt.URL += "/" + } + if opt.Pass != "" { var err error - pass, err = obscure.Reveal(pass) + opt.Pass, err = obscure.Reveal(opt.Pass) if err != nil { return nil, errors.Wrap(err, "couldn't decrypt password") } } - vendor := config.FileGet(name, "vendor") + if opt.Vendor == "" { + opt.Vendor = "other" + } + root = strings.Trim(root, "/") // Parse the endpoint - u, err := url.Parse(endpoint) + u, err := url.Parse(opt.URL) if err != nil { return nil, err } @@ -293,24 +305,23 @@ func NewFs(name, root string) (fs.Fs, error) { f := &Fs{ name: name, root: root, + opt: *opt, endpoint: u, endpointURL: u.String(), srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), - user: user, - pass: pass, precision: fs.ModTimeNotSupported, } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, }).Fill(f) if user != "" || pass != "" { - f.srv.SetUserPass(user, pass) + f.srv.SetUserPass(opt.User, opt.Pass) } else if bearerToken != "" { f.srv.SetHeader("Authorization", "BEARER "+bearerToken) } f.srv.SetErrorHandler(errorHandler) - err = f.setQuirks(vendor) + err = f.setQuirks(opt.Vendor) if err != nil { return nil, err } @@ -339,10 +350,6 @@ func NewFs(name, root string) (fs.Fs, error) { // setQuirks adjusts the Fs for the vendor passed in func (f *Fs) setQuirks(vendor string) error { - if vendor == "" { - vendor = "other" - } - f.vendor = vendor switch vendor { case "owncloud": f.canStream = true @@ -355,7 +362,7 @@ func (f *Fs) setQuirks(vendor string) error { // To mount sharepoint, two Cookies are required // They have to be set instead of BasicAuth f.srv.RemoveHeader("Authorization") // We don't need this Header if using cookies - spCk := odrvcookie.New(f.user, f.pass, f.endpointURL) + spCk := odrvcookie.New(f.opt.User, f.opt.Pass, f.endpointURL) spCookies, err := spCk.Cookies() if err != nil { return err diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go index 11c2e9d46..aa6cad2d0 100644 --- a/backend/yandex/yandex.go +++ b/backend/yandex/yandex.go @@ -16,6 +16,8 @@ import ( yandex "github.com/ncw/rclone/backend/yandex/api" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" @@ -51,29 +53,35 @@ func init() { Name: "yandex", Description: "Yandex Disk", NewFs: NewFs, - Config: func(name string) { - err := oauthutil.Config("yandex", name, oauthConfig) + Config: func(name string, m configmap.Mapper) { + err := oauthutil.Config("yandex", name, m, oauthConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } }, Options: []fs.Option{{ Name: config.ConfigClientID, - Help: "Yandex Client Id - leave blank normally.", + Help: "Yandex Client Id\nLeave blank normally.", }, { Name: config.ConfigClientSecret, - Help: "Yandex Client Secret - leave blank normally.", + Help: "Yandex Client Secret\nLeave blank normally.", }}, }) } +// Options defines the configuration for this backend +type Options struct { + Token string `config:"token"` +} + // Fs represents a remote yandex type Fs struct { name string - root string //root path + root string // root path + opt Options // parsed options features *fs.Features // optional features yd *yandex.Client // client for rest api - diskRoot string //root path with "disk:/" container name + diskRoot string // root path with "disk:/" container name } // Object describes a swift object @@ -109,11 +117,9 @@ func (f *Fs) Features() *fs.Features { } // read access token from ConfigFile string -func getAccessToken(name string) (*oauth2.Token, error) { - // Read the token from the config file - tokenConfig := config.FileGet(name, "token") +func getAccessToken(opt *Options) (*oauth2.Token, error) { //Get access token from config string - decoder := json.NewDecoder(strings.NewReader(tokenConfig)) + decoder := json.NewDecoder(strings.NewReader(opt.Token)) var result *oauth2.Token err := decoder.Decode(&result) if err != nil { @@ -123,9 +129,16 @@ func getAccessToken(name string) (*oauth2.Token, error) { } // NewFs constructs an Fs from the path, container:path -func NewFs(name, root string) (fs.Fs, error) { +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + //read access token from config - token, err := getAccessToken(name) + token, err := getAccessToken(opt) if err != nil { return nil, err } @@ -135,6 +148,7 @@ func NewFs(name, root string) (fs.Fs, error) { f := &Fs{ name: name, + opt: *opt, yd: yandexDisk, } f.features = (&fs.Features{ diff --git a/cmd/cachestats/cachestats.go b/cmd/cachestats/cachestats.go index 9f9c17238..bba5414a9 100644 --- a/cmd/cachestats/cachestats.go +++ b/cmd/cachestats/cachestats.go @@ -8,8 +8,6 @@ import ( "github.com/ncw/rclone/backend/cache" "github.com/ncw/rclone/cmd" - "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/config" "github.com/pkg/errors" "github.com/spf13/cobra" ) @@ -27,17 +25,6 @@ Print cache stats for a remote in JSON format Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) - _, configName, _, err := fs.ParseRemote(args[0]) - if err != nil { - fs.Errorf("cachestats", "%s", err.Error()) - return - } - - if !config.FileGetBool(configName, "read_only", false) { - config.FileSet(configName, "read_only", "true") - defer config.FileDeleteKey(configName, "read_only") - } - fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { var fsCache *cache.Fs diff --git a/cmd/cmd.go b/cmd/cmd.go index 303d76e8b..903b44f1f 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -16,6 +16,7 @@ import ( "runtime" "runtime/pprof" "strconv" + "strings" "time" "github.com/pkg/errors" @@ -151,12 +152,12 @@ func ShowVersion() { // It returns a string with the file name if points to a file // otherwise "". func NewFsFile(remote string) (fs.Fs, string) { - fsInfo, configName, fsPath, err := fs.ParseRemote(remote) + _, _, fsPath, err := fs.ParseRemote(remote) if err != nil { fs.CountError(err) log.Fatalf("Failed to create file system for %q: %v", remote, err) } - f, err := fsInfo.NewFs(configName, fsPath) + f, err := fs.NewFs(remote) switch err { case fs.ErrorIsFile: return f, path.Base(fsPath) @@ -496,3 +497,51 @@ func resolveExitCode(err error) { os.Exit(exitCodeUsageError) } } + +// AddBackendFlags creates flags for all the backend options +func AddBackendFlags() { + for _, fsInfo := range fs.Registry { + done := map[string]struct{}{} + for i := range fsInfo.Options { + opt := &fsInfo.Options[i] + // Skip if done already (eg with Provider options) + if _, doneAlready := done[opt.Name]; doneAlready { + continue + } + done[opt.Name] = struct{}{} + // Make a flag from each option + name := strings.Replace(opt.Name, "_", "-", -1) // convert snake_case to kebab-case + if !opt.NoPrefix { + name = fsInfo.Prefix + "-" + name + } + found := pflag.CommandLine.Lookup(name) != nil + if !found { + // Take first line of help only + help := strings.TrimSpace(opt.Help) + if nl := strings.IndexRune(help, '\n'); nl >= 0 { + help = help[:nl] + } + help = strings.TrimSpace(help) + flag := pflag.CommandLine.VarPF(opt, name, string(opt.ShortOpt), help) + if _, isBool := opt.Value.(bool); isBool { + flag.NoOptDefVal = "true" + } + // Hide on the command line if requested + if opt.Hide&fs.OptionHideCommandLine != 0 { + flag.Hidden = true + } + } else { + fs.Errorf(nil, "Not adding duplicate flag --%s", name) + } + //flag.Hidden = true + } + } +} + +// Main runs rclone interpreting flags and commands out of os.Args +func Main() { + AddBackendFlags() + if err := Root.Execute(); err != nil { + log.Fatalf("Fatal error: %v", err) + } +} diff --git a/cmd/cryptdecode/cryptdecode.go b/cmd/cryptdecode/cryptdecode.go index 00585112e..b2a02b1ed 100644 --- a/cmd/cryptdecode/cryptdecode.go +++ b/cmd/cryptdecode/cryptdecode.go @@ -40,14 +40,14 @@ use it like this Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 11, command, args) cmd.Run(false, false, command, func() error { - fsInfo, configName, _, err := fs.ParseRemote(args[0]) + fsInfo, _, _, config, err := fs.ConfigFs(args[0]) if err != nil { return err } if fsInfo.Name != "crypt" { return errors.New("The remote needs to be of type \"crypt\"") } - cipher, err := crypt.NewCipher(configName) + cipher, err := crypt.NewCipher(config) if err != nil { return err } diff --git a/cmd/lsjson/lsjson.go b/cmd/lsjson/lsjson.go index 925151eda..18b2ce3f3 100644 --- a/cmd/lsjson/lsjson.go +++ b/cmd/lsjson/lsjson.go @@ -102,14 +102,14 @@ can be processed line by line as each item is written one to a line. fsrc := cmd.NewFsSrc(args) var cipher crypt.Cipher if showEncrypted { - fsInfo, configName, _, err := fs.ParseRemote(args[0]) + fsInfo, _, _, config, err := fs.ConfigFs(args[0]) if err != nil { log.Fatalf(err.Error()) } if fsInfo.Name != "crypt" { log.Fatalf("The remote needs to be of type \"crypt\"") } - cipher, err = crypt.NewCipher(configName) + cipher, err = crypt.NewCipher(config) if err != nil { log.Fatalf(err.Error()) } diff --git a/fs/config.go b/fs/config.go index 99b83fc26..062394f5d 100644 --- a/fs/config.go +++ b/fs/config.go @@ -17,6 +17,14 @@ var ( // implementation from the fs ConfigFileGet = func(section, key string) (string, bool) { return "", false } + // Set a value into the config file + // + // This is a function pointer to decouple the config + // implementation from the fs + ConfigFileSet = func(section, key, value string) { + Errorf(nil, "No config handler to set %q = %q in section %q of the config file", key, value, section) + } + // CountError counts an error. If any errors have been // counted then it will exit with a non zero error code. // diff --git a/fs/config/config.go b/fs/config/config.go index 1f7e1d531..8c28577e2 100644 --- a/fs/config/config.go +++ b/fs/config/config.go @@ -81,8 +81,9 @@ var ( ) func init() { - // Set the function pointer up in fs + // Set the function pointers up in fs fs.ConfigFileGet = FileGetFlag + fs.ConfigFileSet = FileSet } func getConfigData() *goconfig.ConfigFile { @@ -705,7 +706,8 @@ func RemoteConfig(name string) { fmt.Printf("Remote config\n") f := MustFindByName(name) if f.Config != nil { - f.Config(name) + m := fs.ConfigMap(f, name) + f.Config(name, m) } } @@ -745,7 +747,7 @@ func ChooseOption(o *fs.Option, name string) string { fmt.Println(o.Help) if o.IsPassword { actions := []string{"yYes type in my own password", "gGenerate random password"} - if o.Optional { + if !o.Required { actions = append(actions, "nNo leave this optional password blank") } var password string @@ -1089,8 +1091,8 @@ func Authorize(args []string) { log.Fatalf("Invalid number of arguments: %d", len(args)) } newType := args[0] - fs := fs.MustFind(newType) - if fs.Config == nil { + f := fs.MustFind(newType) + if f.Config == nil { log.Fatalf("Can't authorize fs %q", newType) } // Name used for temporary fs @@ -1105,20 +1107,15 @@ func Authorize(args []string) { getConfigData().SetValue(name, ConfigClientID, args[1]) getConfigData().SetValue(name, ConfigClientSecret, args[2]) } - fs.Config(name) + m := fs.ConfigMap(f, name) + f.Config(name, m) } // FileGetFlag gets the config key under section returning the // the value and true if found and or ("", false) otherwise -// -// It looks up defaults in the environment if they are present func FileGetFlag(section, key string) (string, bool) { newValue, err := getConfigData().GetValue(section, key) - if err == nil { - return newValue, true - } - envKey := fs.ConfigToEnv(section, key) - return os.LookupEnv(envKey) + return newValue, err == nil } // FileGet gets the config key under section returning the @@ -1134,46 +1131,14 @@ func FileGet(section, key string, defaultVal ...string) string { return getConfigData().MustValue(section, key, defaultVal...) } -// FileGetBool gets the config key under section returning the -// default or false if not set. -// -// It looks up defaults in the environment if they are present -func FileGetBool(section, key string, defaultVal ...bool) bool { - envKey := fs.ConfigToEnv(section, key) - newValue, found := os.LookupEnv(envKey) - if found { - newBool, err := strconv.ParseBool(newValue) - if err != nil { - fs.Errorf(nil, "Couldn't parse %q into bool - ignoring: %v", envKey, err) - } else { - defaultVal = []bool{newBool} - } - } - return getConfigData().MustBool(section, key, defaultVal...) -} - -// FileGetInt gets the config key under section returning the -// default or 0 if not set. -// -// It looks up defaults in the environment if they are present -func FileGetInt(section, key string, defaultVal ...int) int { - envKey := fs.ConfigToEnv(section, key) - newValue, found := os.LookupEnv(envKey) - if found { - newInt, err := strconv.Atoi(newValue) - if err != nil { - fs.Errorf(nil, "Couldn't parse %q into int - ignoring: %v", envKey, err) - } else { - defaultVal = []int{newInt} - } - } - return getConfigData().MustInt(section, key, defaultVal...) -} - // FileSet sets the key in section to value. It doesn't save // the config file. func FileSet(section, key, value string) { - getConfigData().SetValue(section, key, value) + if value != "" { + getConfigData().SetValue(section, key, value) + } else { + FileDeleteKey(section, key) + } } // FileDeleteKey deletes the config key in the config file. diff --git a/fs/fs.go b/fs/fs.go index 793b20bd6..f6befdcd7 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -2,6 +2,7 @@ package fs import ( + "fmt" "io" "io/ioutil" "log" @@ -13,6 +14,8 @@ import ( "strings" "time" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fspath" "github.com/ncw/rclone/fs/hash" "github.com/pkg/errors" @@ -68,24 +71,87 @@ type RegInfo struct { Name string // Description of this fs - defaults to Name Description string + // Prefix for command line flags for this fs - defaults to Name if not set + Prefix string // Create a new file system. If root refers to an existing // object, then it should return a Fs which which points to // the parent of that object and ErrorIsFile. - NewFs func(name string, root string) (Fs, error) `json:"-"` + NewFs func(name string, root string, config configmap.Mapper) (Fs, error) `json:"-"` // Function to call to help with config - Config func(string) `json:"-"` + Config func(name string, config configmap.Mapper) `json:"-"` // Options for the Fs configuration - Options []Option + Options Options } +// Options is a slice of configuration Option for a backend +type Options []Option + +// Set the default values for the options +func (os Options) setValues() { + for i := range os { + o := &os[i] + if o.Default == nil { + o.Default = "" + } + } +} + +// OptionVisibility controls whether the options are visible in the +// configurator or the command line. +type OptionVisibility byte + +// Constants Option.Hide +const ( + OptionHideCommandLine OptionVisibility = 1 << iota + OptionHideConfigurator + OptionHideBoth = OptionHideCommandLine | OptionHideConfigurator +) + // Option is describes an option for the config wizard +// +// This also describes command line options and environment variables type Option struct { - Name string - Help string - Provider string - Optional bool - IsPassword bool - Examples OptionExamples `json:",omitempty"` + Name string // name of the option in snake_case + Help string // Help, the first line only is used for the command line help + Provider string // Set to filter on provider + Default interface{} // default value, nil => "" + Value interface{} // value to be set by flags + Examples OptionExamples `json:",omitempty"` // config examples + ShortOpt string // the short option for this if required + Hide OptionVisibility // set this to hide the config from the configurator or the command line + Required bool // this option is required + IsPassword bool // set if the option is a password + NoPrefix bool // set if the option for this should not use the backend prefix + Advanced bool // set if this is an advanced config option +} + +// Gets the current current value which is the default if not set +func (o *Option) value() interface{} { + val := o.Value + if val == nil { + val = o.Default + } + return val +} + +// String turns Option into a string +func (o *Option) String() string { + return fmt.Sprint(o.value()) +} + +// Set a Option from a string +func (o *Option) Set(s string) (err error) { + newValue, err := configstruct.StringToInterface(o.value(), s) + if err != nil { + return err + } + o.Value = newValue + return nil +} + +// Type of the value +func (o *Option) Type() string { + return reflect.TypeOf(o.value()).Name() } // OptionExamples is a slice of examples @@ -114,6 +180,10 @@ type OptionExample struct { // // Fs modules should use this in an init() function func Register(info *RegInfo) { + info.Options.setValues() + if info.Prefix == "" { + info.Prefix = info.Name + } Registry = append(Registry, info) } @@ -792,7 +862,8 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err e var fsName string var ok bool if configName != "" { - fsName, ok = ConfigFileGet(configName, "type") + m := ConfigMap(nil, configName) + fsName, ok = m.Get("type") if !ok { return nil, "", "", ErrorNotFoundInConfigFile } @@ -804,6 +875,119 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err e return fsInfo, configName, fsPath, err } +// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name +type configEnvVars string + +// Get a config item from the environment variables if possible +func (configName configEnvVars) Get(key string) (value string, ok bool) { + return os.LookupEnv(ConfigToEnv(string(configName), key)) +} + +// A configmap.Getter to read from the environment RCLONE_option_name +type optionEnvVars string + +// Get a config item from the option environment variables if possible +func (prefix optionEnvVars) Get(key string) (value string, ok bool) { + return os.LookupEnv(OptionToEnv(string(prefix) + "-" + key)) +} + +// A configmap.Getter to read either the default value or the set +// value from the RegInfo.Options +type regInfoValues struct { + fsInfo *RegInfo + useDefault bool +} + +// override the values in configMap with the either the flag values or +// the default values +func (r *regInfoValues) Get(key string) (value string, ok bool) { + for i := range r.fsInfo.Options { + o := &r.fsInfo.Options[i] + if o.Name == key { + if r.useDefault || o.Value != nil { + return o.String(), true + } + break + } + } + return "", false +} + +// A configmap.Setter to read from the config file +type setConfigFile string + +// Set a config item into the config file +func (section setConfigFile) Set(key, value string) { + Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section) + ConfigFileSet(string(section), key, value) +} + +// A configmap.Getter to read from the config file +type getConfigFile string + +// Get a config item from the config file +func (section getConfigFile) Get(key string) (value string, ok bool) { + value, ok = ConfigFileGet(string(section), key) + // Ignore empty lines in the config file + if value == "" { + ok = false + } + return value, ok +} + +// ConfigMap creates a configmap.Map from the *RegInfo and the +// configName passed in. +// +// If fsInfo is nil then the returned configmap.Map should only be +// used for reading non backend specific parameters, such as "type". +func ConfigMap(fsInfo *RegInfo, configName string) (config *configmap.Map) { + // Create the config + config = configmap.New() + + // Read the config, more specific to least specific + + // flag values + if fsInfo != nil { + config.AddGetter(®InfoValues{fsInfo, false}) + } + + // remote specific environment vars + config.AddGetter(configEnvVars(configName)) + + // backend specific environment vars + if fsInfo != nil { + config.AddGetter(optionEnvVars(fsInfo.Prefix)) + } + + // config file + config.AddGetter(getConfigFile(configName)) + + // default values + if fsInfo != nil { + config.AddGetter(®InfoValues{fsInfo, true}) + } + + // Set Config + config.AddSetter(setConfigFile(configName)) + return config +} + +// ConfigFs makes the config for calling NewFs with. +// +// It parses the path which is of the form remote:path +// +// Remotes are looked up in the config file. If the remote isn't +// found then NotFoundInConfigFile will be returned. +func ConfigFs(path string) (fsInfo *RegInfo, configName, fsPath string, config *configmap.Map, err error) { + // Parse the remote path + fsInfo, configName, fsPath, err = ParseRemote(path) + if err != nil { + return + } + config = ConfigMap(fsInfo, configName) + return +} + // NewFs makes a new Fs object from the path // // The path is of the form remote:path @@ -814,11 +998,11 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err e // On Windows avoid single character remote names as they can be mixed // up with drive letters. func NewFs(path string) (Fs, error) { - fsInfo, configName, fsPath, err := ParseRemote(path) + fsInfo, configName, fsPath, config, err := ConfigFs(path) if err != nil { return nil, err } - return fsInfo.NewFs(configName, fsPath) + return fsInfo.NewFs(configName, fsPath, config) } // TemporaryLocalFs creates a local FS in the OS's temporary directory. diff --git a/fs/fs_test.go b/fs/fs_test.go index 10b0fe84f..753614b0b 100644 --- a/fs/fs_test.go +++ b/fs/fs_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/spf13/pflag" "github.com/stretchr/testify/assert" ) @@ -52,3 +53,20 @@ func TestFeaturesDisableList(t *testing.T) { assert.False(t, ft.CaseInsensitive) assert.False(t, ft.DuplicateFiles) } + +// Check it satisfies the interface +var _ pflag.Value = (*Option)(nil) + +func TestOption(t *testing.T) { + d := &Option{ + Name: "potato", + Value: SizeSuffix(17 << 20), + } + assert.Equal(t, "17M", d.String()) + assert.Equal(t, "SizeSuffix", d.Type()) + err := d.Set("18M") + assert.NoError(t, err) + assert.Equal(t, SizeSuffix(18<<20), d.Value) + err = d.Set("sdfsdf") + assert.Error(t, err) +} diff --git a/lib/oauthutil/oauthutil.go b/lib/oauthutil/oauthutil.go index 0f146a477..777297b82 100644 --- a/lib/oauthutil/oauthutil.go +++ b/lib/oauthutil/oauthutil.go @@ -15,6 +15,7 @@ import ( "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/fshttp" "github.com/pkg/errors" "github.com/skratchdot/open-golang/open" @@ -85,9 +86,9 @@ type oldToken struct { // GetToken returns the token saved in the config file under // section name. -func GetToken(name string) (*oauth2.Token, error) { - tokenString := config.FileGet(name, config.ConfigToken) - if tokenString == "" { +func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) { + tokenString, ok := m.Get(config.ConfigToken) + if !ok || tokenString == "" { return nil, errors.New("empty token found - please run rclone config again") } token := new(oauth2.Token) @@ -110,7 +111,7 @@ func GetToken(name string) (*oauth2.Token, error) { token.RefreshToken = oldtoken.RefreshToken token.Expiry = oldtoken.Expiry // Save new format in config file - err = PutToken(name, token, false) + err = PutToken(name, m, token, false) if err != nil { return nil, err } @@ -120,14 +121,14 @@ func GetToken(name string) (*oauth2.Token, error) { // PutToken stores the token in the config file // // This saves the config file if it changes -func PutToken(name string, token *oauth2.Token, newSection bool) error { +func PutToken(name string, m configmap.Mapper, token *oauth2.Token, newSection bool) error { tokenBytes, err := json.Marshal(token) if err != nil { return err } tokenString := string(tokenBytes) - old := config.FileGet(name, config.ConfigToken) - if tokenString != old { + old, ok := m.Get(config.ConfigToken) + if !ok || tokenString != old { err = config.SetValueAndSave(name, config.ConfigToken, tokenString) if newSection && err != nil { fs.Debugf(name, "Added new token to config, still needs to be saved") @@ -144,6 +145,7 @@ func PutToken(name string, token *oauth2.Token, newSection bool) error { type TokenSource struct { mu sync.Mutex name string + m configmap.Mapper tokenSource oauth2.TokenSource token *oauth2.Token config *oauth2.Config @@ -176,7 +178,7 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) { if ts.expiryTimer != nil { ts.expiryTimer.Reset(ts.timeToExpiry()) } - err = PutToken(ts.name, token, false) + err = PutToken(ts.name, ts.m, token, false) if err != nil { return nil, err } @@ -229,27 +231,27 @@ func Context(client *http.Client) context.Context { // config file if they are not blank. // If any value is overridden, true is returned. // the origConfig is copied -func overrideCredentials(name string, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) { +func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) { newConfig = new(oauth2.Config) *newConfig = *origConfig changed = false - ClientID := config.FileGet(name, config.ConfigClientID) - if ClientID != "" { + ClientID, ok := m.Get(config.ConfigClientID) + if ok && ClientID != "" { newConfig.ClientID = ClientID changed = true } - ClientSecret := config.FileGet(name, config.ConfigClientSecret) - if ClientSecret != "" { + ClientSecret, ok := m.Get(config.ConfigClientSecret) + if ok && ClientSecret != "" { newConfig.ClientSecret = ClientSecret changed = true } - AuthURL := config.FileGet(name, config.ConfigAuthURL) - if AuthURL != "" { + AuthURL, ok := m.Get(config.ConfigAuthURL) + if ok && AuthURL != "" { newConfig.Endpoint.AuthURL = AuthURL changed = true } - TokenURL := config.FileGet(name, config.ConfigTokenURL) - if TokenURL != "" { + TokenURL, ok := m.Get(config.ConfigTokenURL) + if ok && TokenURL != "" { newConfig.Endpoint.TokenURL = TokenURL changed = true } @@ -260,9 +262,9 @@ func overrideCredentials(name string, origConfig *oauth2.Config) (newConfig *oau // configures a Client with it. It returns the client and a // TokenSource which Invalidate may need to be called on. It uses the // httpClient passed in as the base client. -func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) { - config, _ = overrideCredentials(name, config) - token, err := GetToken(name) +func NewClientWithBaseClient(name string, m configmap.Mapper, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) { + config, _ = overrideCredentials(name, m, config) + token, err := GetToken(name, m) if err != nil { return nil, nil, err } @@ -274,6 +276,7 @@ func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *htt // tokens in the config file ts := &TokenSource{ name: name, + m: m, token: token, config: config, ctx: ctx, @@ -284,36 +287,37 @@ func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *htt // NewClient gets a token from the config file and configures a Client // with it. It returns the client and a TokenSource which Invalidate may need to be called on -func NewClient(name string, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) { - return NewClientWithBaseClient(name, oauthConfig, fshttp.NewClient(fs.Config)) +func NewClient(name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) { + return NewClientWithBaseClient(name, m, oauthConfig, fshttp.NewClient(fs.Config)) } // Config does the initial creation of the token // // It may run an internal webserver to receive the results -func Config(id, name string, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error { - return doConfig(id, name, nil, config, true, opts) +func Config(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error { + return doConfig(id, name, m, nil, config, true, opts) } // ConfigNoOffline does the same as Config but does not pass the // "access_type=offline" parameter. -func ConfigNoOffline(id, name string, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error { - return doConfig(id, name, nil, config, false, opts) +func ConfigNoOffline(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error { + return doConfig(id, name, m, nil, config, false, opts) } // ConfigErrorCheck does the same as Config, but allows the backend to pass a error handling function // This function gets called with the request made to rclone as a parameter if no code was found -func ConfigErrorCheck(id, name string, errorHandler func(*http.Request) AuthError, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error { - return doConfig(id, name, errorHandler, config, true, opts) +func ConfigErrorCheck(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error { + return doConfig(id, name, m, errorHandler, config, true, opts) } -func doConfig(id, name string, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error { - oauthConfig, changed := overrideCredentials(name, oauthConfig) - automatic := config.FileGet(name, config.ConfigAutomatic) != "" +func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error { + oauthConfig, changed := overrideCredentials(name, m, oauthConfig) + auto, ok := m.Get(config.ConfigAutomatic) + automatic := ok && auto != "" // See if already have a token - tokenString := config.FileGet(name, "token") - if tokenString != "" { + tokenString, ok := m.Get("token") + if ok && tokenString != "" { fmt.Printf("Already have a token - refresh?\n") if !config.Confirm() { return nil @@ -354,7 +358,7 @@ func doConfig(id, name string, errorHandler func(*http.Request) AuthError, oauth if err != nil { return err } - return PutToken(name, token, false) + return PutToken(name, m, token, false) } case TitleBarRedirectURL: useWebServer = automatic @@ -436,7 +440,7 @@ func doConfig(id, name string, errorHandler func(*http.Request) AuthError, oauth } fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result) } - return PutToken(name, token, true) + return PutToken(name, m, token, true) } // Local web server for collecting auth diff --git a/rclone.go b/rclone.go index f476ba44a..955f89169 100644 --- a/rclone.go +++ b/rclone.go @@ -4,8 +4,6 @@ package main import ( - "log" - "github.com/ncw/rclone/cmd" _ "github.com/ncw/rclone/backend/all" // import all backends @@ -13,7 +11,5 @@ import ( ) func main() { - if err := cmd.Root.Execute(); err != nil { - log.Fatalf("Fatal error: %v", err) - } + cmd.Main() }