docs: punctuation cleanup

See #5538
This commit is contained in:
albertony 2021-08-16 11:30:01 +02:00
parent b868561951
commit e2f47ecdeb
84 changed files with 1084 additions and 1052 deletions

View File

@ -20,7 +20,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".", Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true, Required: true,
}}, }},
} }

View File

@ -75,7 +75,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)", Help: "Storage Account Name.\n\nLeave blank to use SAS URL or Emulator.",
}, { }, {
Name: "service_principal_file", Name: "service_principal_file",
Help: `Path to file containing credentials for use with a service principal. Help: `Path to file containing credentials for use with a service principal.
@ -91,13 +91,13 @@ See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/a
`, `,
}, { }, {
Name: "key", Name: "key",
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)", Help: "Storage Account Key.\n\nLeave blank to use SAS URL or Emulator.",
}, { }, {
Name: "sas_url", Name: "sas_url",
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)", Help: "SAS URL for container level access only.\n\nLeave blank if using account/key or Emulator.",
}, { }, {
Name: "use_msi", Name: "use_msi",
Help: `Use a managed service identity to authenticate (only works in Azure) Help: `Use a managed service identity to authenticate (only works in Azure).
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/) When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
to authenticate to Azure Storage instead of a SAS token or account key. to authenticate to Azure Storage instead of a SAS token or account key.
@ -110,27 +110,27 @@ msi_client_id, or msi_mi_res_id parameters.`,
Default: false, Default: false,
}, { }, {
Name: "msi_object_id", Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true, Advanced: true,
}, { }, {
Name: "msi_client_id", Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true, Advanced: true,
}, { }, {
Name: "msi_mi_res_id", Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.", Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true, Advanced: true,
}, { }, {
Name: "use_emulator", Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)", Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
Default: false, Default: false,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true, Advanced: true,
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: "Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)", Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
@ -201,6 +201,7 @@ to start uploading.`,
Default: memoryPoolFlushTime, Default: memoryPoolFlushTime,
Advanced: true, Advanced: true,
Help: `How often internal memory buffer pools will be flushed. Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`, This option controls how often unused buffers will be removed from the pool.`,
}, { }, {
@ -220,12 +221,12 @@ This option controls how often unused buffers will be removed from the pool.`,
encoder.EncodeRightPeriod), encoder.EncodeRightPeriod),
}, { }, {
Name: "public_access", Name: "public_access",
Help: "Public access level of a container: blob, container.", Help: "Public access level of a container: blob or container.",
Default: string(azblob.PublicAccessNone), Default: string(azblob.PublicAccessNone),
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
Value: string(azblob.PublicAccessNone), Value: string(azblob.PublicAccessNone),
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value", Help: "The container and its blobs can be accessed only with an authorized request.\nIt's a default value.",
}, { }, {
Value: string(azblob.PublicAccessBlob), Value: string(azblob.PublicAccessBlob),
Help: "Blob data within this container can be read via anonymous request.", Help: "Blob data within this container can be read via anonymous request.",

View File

@ -75,15 +75,15 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Account ID or Application Key ID", Help: "Account ID or Application Key ID.",
Required: true, Required: true,
}, { }, {
Name: "key", Name: "key",
Help: "Application Key", Help: "Application Key.",
Required: true, Required: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service.\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true, Advanced: true,
}, { }, {
Name: "test_mode", Name: "test_mode",
@ -103,7 +103,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Advanced: true, Advanced: true,
}, { }, {
Name: "versions", Name: "versions",
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.", Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
@ -121,7 +121,7 @@ This value should be set no larger than 4.657 GiB (== 5 GB).`,
Advanced: true, Advanced: true,
}, { }, {
Name: "copy_cutoff", Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy Help: `Cutoff for switching to multipart copy.
Any files larger than this that need to be server-side copied will be Any files larger than this that need to be server-side copied will be
copied in chunks of this size. copied in chunks of this size.
@ -131,17 +131,19 @@ The minimum is 0 and the maximum is 4.6 GiB.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory. Help: `Upload chunk size.
When uploading large files, chunk the file into this size. Note that When uploading large files, chunk the file into this size.
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the Must fit in memory. These chunks are buffered in memory and there
minimum size.`, might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files Help: `Disable checksums for large (> upload cutoff) files.
Normally rclone will calculate the SHA1 checksum of the input before Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great uploading it so it can add it to metadata on the object. This is great

View File

@ -110,19 +110,19 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "box_config_file", Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, { }, {
Name: "access_token", Name: "access_token",
Help: "Box App Primary Access Token\nLeave blank normally.", Help: "Box App Primary Access Token\n\nLeave blank normally.",
}, { }, {
Name: "box_sub_type", Name: "box_sub_type",
Default: "user", Default: "user",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "user", Value: "user",
Help: "Rclone should act on behalf of a user", Help: "Rclone should act on behalf of a user.",
}, { }, {
Value: "enterprise", Value: "enterprise",
Help: "Rclone should act on behalf of a service account", Help: "Rclone should act on behalf of a service account.",
}}, }},
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",

View File

@ -69,26 +69,26 @@ func init() {
CommandHelp: commandHelp, CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true, Required: true,
}, { }, {
Name: "plex_url", Name: "plex_url",
Help: "The URL of the Plex server", Help: "The URL of the Plex server.",
}, { }, {
Name: "plex_username", Name: "plex_username",
Help: "The username of the Plex user", Help: "The username of the Plex user.",
}, { }, {
Name: "plex_password", Name: "plex_password",
Help: "The password of the Plex user", Help: "The password of the Plex user.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "plex_token", Name: "plex_token",
Help: "The plex token for authentication - auto set normally", Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
}, { }, {
Name: "plex_insecure", Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server", Help: "Skip all certificate verification when connecting to the Plex server.",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
@ -144,7 +144,7 @@ oldest chunks until it goes under this value.`,
}, { }, {
Name: "db_path", Name: "db_path",
Default: filepath.Join(config.GetCacheDir(), "cache-backend"), Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.", Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_path", Name: "chunk_path",
@ -168,6 +168,7 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
Name: "chunk_clean_interval", Name: "chunk_clean_interval",
Default: DefCacheChunkCleanInterval, Default: DefCacheChunkCleanInterval,
Help: `How often should the cache perform cleanups of the chunk storage. Help: `How often should the cache perform cleanups of the chunk storage.
The default value should be ok for most people. If you find that the The default value should be ok for most people. If you find that the
cache goes over "cache-chunk-total-size" too often then try to lower cache goes over "cache-chunk-total-size" too often then try to lower
this value to force it to perform cleanups more often.`, this value to force it to perform cleanups more often.`,
@ -221,7 +222,7 @@ available on the local machine.`,
}, { }, {
Name: "rps", Name: "rps",
Default: int(DefCacheRps), Default: int(DefCacheRps),
Help: `Limits the number of requests per second to the source FS (-1 to disable) Help: `Limits the number of requests per second to the source FS (-1 to disable).
This setting places a hard limit on the number of requests per second This setting places a hard limit on the number of requests per second
that cache will be doing to the cloud provider remote and try to that cache will be doing to the cloud provider remote and try to
@ -242,7 +243,7 @@ still pass.`,
}, { }, {
Name: "writes", Name: "writes",
Default: DefCacheWrites, Default: DefCacheWrites,
Help: `Cache file data on writes through the FS Help: `Cache file data on writes through the FS.
If you need to read files immediately after you upload them through If you need to read files immediately after you upload them through
cache you can enable this flag to have their data stored in the cache you can enable this flag to have their data stored in the
@ -263,7 +264,7 @@ provider`,
}, { }, {
Name: "tmp_wait_time", Name: "tmp_wait_time",
Default: DefCacheTmpWaitTime, Default: DefCacheTmpWaitTime,
Help: `How long should files be stored in local cache before being uploaded Help: `How long should files be stored in local cache before being uploaded.
This is the duration that a file must wait in the temporary location This is the duration that a file must wait in the temporary location
_cache-tmp-upload-path_ before it is selected for upload. _cache-tmp-upload-path_ before it is selected for upload.
@ -274,7 +275,7 @@ to start the upload if a queue formed for this purpose.`,
}, { }, {
Name: "db_wait_time", Name: "db_wait_time",
Default: DefCacheDbWaitTime, Default: DefCacheDbWaitTime,
Help: `How long to wait for the DB to be available - 0 is unlimited Help: `How long to wait for the DB to be available - 0 is unlimited.
Only one process can have the DB open at any one time, so rclone waits Only one process can have the DB open at any one time, so rclone waits
for this duration for the DB to become available before it gives an for this duration for the DB to become available before it gives an

View File

@ -150,6 +150,7 @@ func init() {
Name: "remote", Name: "remote",
Required: true, Required: true,
Help: `Remote to chunk/unchunk. Help: `Remote to chunk/unchunk.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir", Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or maybe "myremote:" (not recommended).`, "myremote:bucket" or maybe "myremote:" (not recommended).`,
}, { }, {
@ -163,6 +164,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
Hide: fs.OptionHideCommandLine, Hide: fs.OptionHideCommandLine,
Default: `*.rclone_chunk.###`, Default: `*.rclone_chunk.###`,
Help: `String format of chunk file names. Help: `String format of chunk file names.
The two placeholders are: base file name (*) and chunk number (#...). The two placeholders are: base file name (*) and chunk number (#...).
There must be one and only one asterisk and one or more consecutive hash characters. There must be one and only one asterisk and one or more consecutive hash characters.
If chunk number has less digits than the number of hashes, it is left-padded by zeros. If chunk number has less digits than the number of hashes, it is left-padded by zeros.
@ -174,48 +176,57 @@ Possible chunk files are ignored if their name does not match given format.`,
Hide: fs.OptionHideCommandLine, Hide: fs.OptionHideCommandLine,
Default: 1, Default: 1,
Help: `Minimum valid chunk number. Usually 0 or 1. Help: `Minimum valid chunk number. Usually 0 or 1.
By default chunk numbers start from 1.`, By default chunk numbers start from 1.`,
}, { }, {
Name: "meta_format", Name: "meta_format",
Advanced: true, Advanced: true,
Hide: fs.OptionHideCommandLine, Hide: fs.OptionHideCommandLine,
Default: "simplejson", Default: "simplejson",
Help: `Format of the metadata object or "none". By default "simplejson". Help: `Format of the metadata object or "none".
By default "simplejson".
Metadata is a small JSON file named after the composite file.`, Metadata is a small JSON file named after the composite file.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "none", Value: "none",
Help: `Do not use metadata files at all. Requires hash type "none".`, Help: `Do not use metadata files at all.
Requires hash type "none".`,
}, { }, {
Value: "simplejson", Value: "simplejson",
Help: `Simple JSON supports hash sums and chunk validation. Help: `Simple JSON supports hash sums and chunk validation.
It has the following fields: ver, size, nchunks, md5, sha1.`, It has the following fields: ver, size, nchunks, md5, sha1.`,
}}, }},
}, { }, {
Name: "hash_type", Name: "hash_type",
Advanced: false, Advanced: false,
Default: "md5", Default: "md5",
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`, Help: `Choose how chunker handles hash sums.
All modes but "none" require metadata.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "none", Value: "none",
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`, Help: `Pass any hash supported by wrapped remote for non-chunked files.
Return nothing otherwise.`,
}, { }, {
Value: "md5", Value: "md5",
Help: `MD5 for composite files`, Help: `MD5 for composite files.`,
}, { }, {
Value: "sha1", Value: "sha1",
Help: `SHA1 for composite files`, Help: `SHA1 for composite files.`,
}, { }, {
Value: "md5all", Value: "md5all",
Help: `MD5 for all files`, Help: `MD5 for all files.`,
}, { }, {
Value: "sha1all", Value: "sha1all",
Help: `SHA1 for all files`, Help: `SHA1 for all files.`,
}, { }, {
Value: "md5quick", Value: "md5quick",
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`, Help: `Copying a file to chunker will request MD5 from the source.
Falling back to SHA1 if unsupported.`,
}, { }, {
Value: "sha1quick", Value: "sha1quick",
Help: `Similar to "md5quick" but prefers SHA1 over MD5`, Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
}}, }},
}, { }, {
Name: "fail_hard", Name: "fail_hard",

View File

@ -83,23 +83,23 @@ func init() {
Name: "level", Name: "level",
Help: `GZIP compression level (-2 to 9). Help: `GZIP compression level (-2 to 9).
Generally -1 (default, equivalent to 5) is recommended. Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6 Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return. generally offers very little return.
Level -2 uses Huffmann encoding only. Only use if you know what you Level -2 uses Huffmann encoding only. Only use if you know what you
are doing. are doing.
Level 0 turns off compression.`, Level 0 turns off compression.`,
Default: sgzip.DefaultCompression, Default: sgzip.DefaultCompression,
Advanced: true, Advanced: true,
}, { }, {
Name: "ram_cache_limit", Name: "ram_cache_limit",
Help: `Some remotes don't allow the upload of files with unknown size. Help: `Some remotes don't allow the upload of files with unknown size.
In this case the compressed file will need to be cached to determine In this case the compressed file will need to be cached to determine
it's size. it's size.
Files smaller than this limit will be cached in RAM, files larger than Files smaller than this limit will be cached in RAM, files larger than
this limit will be cached on disk.`, this limit will be cached on disk.`,
Default: fs.SizeSuffix(20 * 1024 * 1024), Default: fs.SizeSuffix(20 * 1024 * 1024),
Advanced: true, Advanced: true,
}}, }},

View File

@ -30,7 +30,7 @@ func init() {
CommandHelp: commandHelp, CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true, Required: true,
}, { }, {
Name: "filename_encryption", Name: "filename_encryption",
@ -39,13 +39,13 @@ func init() {
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
Value: "standard", Value: "standard",
Help: "Encrypt the filenames. See the docs for the details.", Help: "Encrypt the filenames.\nSee the docs for the details.",
}, { }, {
Value: "obfuscate", Value: "obfuscate",
Help: "Very simple filename obfuscation.", Help: "Very simple filename obfuscation.",
}, { }, {
Value: "off", Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.", Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
}, },
}, },
}, { }, {
@ -71,7 +71,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Required: true, Required: true,
}, { }, {
Name: "password2", Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.", Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",

View File

@ -270,7 +270,7 @@ func init() {
}}, }},
}, { }, {
Name: "root_folder_id", Name: "root_folder_id",
Help: `ID of the root folder Help: `ID of the root folder.
Leave blank normally. Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use Fill in to access "Computers" folders (see docs), or for rclone to use
@ -278,15 +278,15 @@ a non root folder as its starting point.
`, `,
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
}, { }, {
Name: "team_drive", Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive)", Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
}, { }, {
@ -297,12 +297,12 @@ a non root folder as its starting point.
}, { }, {
Name: "use_trash", Name: "use_trash",
Default: true, Default: true,
Help: "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.", Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
Advanced: true, Advanced: true,
}, { }, {
Name: "skip_gdocs", Name: "skip_gdocs",
Default: false, Default: false,
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.", Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
Advanced: true, Advanced: true,
}, { }, {
Name: "skip_checksum_gphotos", Name: "skip_checksum_gphotos",
@ -335,7 +335,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
}, { }, {
Name: "trashed_only", Name: "trashed_only",
Default: false, Default: false,
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.", Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
Advanced: true, Advanced: true,
}, { }, {
Name: "starred_only", Name: "starred_only",
@ -345,7 +345,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
}, { }, {
Name: "formats", Name: "formats",
Default: "", Default: "",
Help: "Deprecated: see export_formats", Help: "Deprecated: See export_formats.",
Advanced: true, Advanced: true,
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
}, { }, {
@ -361,12 +361,12 @@ commands (copy, sync, etc.), and with all other commands too.`,
}, { }, {
Name: "allow_import_name_change", Name: "allow_import_name_change",
Default: false, Default: false,
Help: "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.", Help: "Allow the filetype to change when uploading Google docs.\n\nE.g. file.doc to file.docx. This will confuse sync and reupload every time.",
Advanced: true, Advanced: true,
}, { }, {
Name: "use_created_date", Name: "use_created_date",
Default: false, Default: false,
Help: `Use file created date instead of modified date., Help: `Use file created date instead of modified date.
Useful when downloading data and you want the creation date used in Useful when downloading data and you want the creation date used in
place of the last modified date. place of the last modified date.
@ -400,7 +400,7 @@ date is used.`,
}, { }, {
Name: "list_chunk", Name: "list_chunk",
Default: 1000, Default: 1000,
Help: "Size of listing chunk 100-1000. 0 to disable.", Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true, Advanced: true,
}, { }, {
Name: "impersonate", Name: "impersonate",
@ -410,17 +410,19 @@ date is used.`,
}, { }, {
Name: "alternate_export", Name: "alternate_export",
Default: false, Default: false,
Help: "Deprecated: no longer needed", Help: "Deprecated: No longer needed.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Default: defaultChunkSize, Default: defaultChunkSize,
Help: "Cutoff for switching to chunked upload", Help: "Cutoff for switching to chunked upload.",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Default: defaultChunkSize, Default: defaultChunkSize,
Help: `Upload chunk size. Must a power of 2 >= 256k. Help: `Upload chunk size.
Must a power of 2 >= 256k.
Making this larger will improve performance, but note that each chunk Making this larger will improve performance, but note that each chunk
is buffered in memory one per transfer. is buffered in memory one per transfer.
@ -490,7 +492,7 @@ configurations.`,
}, { }, {
Name: "disable_http2", Name: "disable_http2",
Default: true, Default: true,
Help: `Disable drive using http2 Help: `Disable drive using http2.
There is currently an unsolved issue with the google drive backend and There is currently an unsolved issue with the google drive backend and
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
@ -504,7 +506,7 @@ See: https://github.com/rclone/rclone/issues/3631
}, { }, {
Name: "stop_on_upload_limit", Name: "stop_on_upload_limit",
Default: false, Default: false,
Help: `Make upload limit errors be fatal Help: `Make upload limit errors be fatal.
At the time of writing it is only possible to upload 750 GiB of data to At the time of writing it is only possible to upload 750 GiB of data to
Google Drive a day (this is an undocumented limit). When this limit is Google Drive a day (this is an undocumented limit). When this limit is
@ -521,7 +523,7 @@ See: https://github.com/rclone/rclone/issues/3857
}, { }, {
Name: "stop_on_download_limit", Name: "stop_on_download_limit",
Default: false, Default: false,
Help: `Make download limit errors be fatal Help: `Make download limit errors be fatal.
At the time of writing it is only possible to download 10 TiB of data from At the time of writing it is only possible to download 10 TiB of data from
Google Drive a day (this is an undocumented limit). When this limit is Google Drive a day (this is an undocumented limit). When this limit is
@ -535,7 +537,7 @@ Google don't document so it may break in the future.
Advanced: true, Advanced: true,
}, { }, {
Name: "skip_shortcuts", Name: "skip_shortcuts",
Help: `If set skip shortcut files Help: `If set skip shortcut files.
Normally rclone dereferences shortcut files making them appear as if Normally rclone dereferences shortcut files making them appear as if
they are the original file (see [the shortcuts section](#shortcuts)). they are the original file (see [the shortcuts section](#shortcuts)).

View File

@ -154,7 +154,7 @@ func init() {
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size", Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v). Help: fmt.Sprintf(`Upload chunk size (< %v).
Any files larger than this will be uploaded in chunks of this size. Any files larger than this will be uploaded in chunks of this size.
@ -252,7 +252,7 @@ maximise throughput.
Advanced: true, Advanced: true,
}, { }, {
Name: "batch_timeout", Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be If an upload batch is idle for more than this long then it will be
uploaded. uploaded.

View File

@ -37,21 +37,21 @@ func init() {
Description: "1Fichier", Description: "1Fichier",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl", Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key", Name: "api_key",
}, { }, {
Help: "If you want to download a shared folder, add this parameter", Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder", Name: "shared_folder",
Required: false, Required: false,
Advanced: true, Advanced: true,
}, { }, {
Help: "If you want to download a shared file that is password protected, add this parameter", Help: "If you want to download a shared file that is password protected, add this parameter.",
Name: "file_password", Name: "file_password",
Required: false, Required: false,
Advanced: true, Advanced: true,
IsPassword: true, IsPassword: true,
}, { }, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter", Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
Name: "folder_password", Name: "folder_password",
Required: false, Required: false,
Advanced: true, Advanced: true,

View File

@ -65,7 +65,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "url", Name: "url",
Help: "URL of the Enterprise File Fabric to connect to", Help: "URL of the Enterprise File Fabric to connect to.",
Required: true, Required: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "https://storagemadeeasy.com", Value: "https://storagemadeeasy.com",
@ -79,14 +79,15 @@ func init() {
}}, }},
}, { }, {
Name: "root_folder_id", Name: "root_folder_id",
Help: `ID of the root folder Help: `ID of the root folder.
Leave blank normally. Leave blank normally.
Fill in to make rclone start with directory of a given ID. Fill in to make rclone start with directory of a given ID.
`, `,
}, { }, {
Name: "permanent_token", Name: "permanent_token",
Help: `Permanent Authentication Token Help: `Permanent Authentication Token.
A Permanent Authentication Token can be created in the Enterprise File A Permanent Authentication Token can be created in the Enterprise File
Fabric, on the users Dashboard under Security, there is an entry Fabric, on the users Dashboard under Security, there is an entry
@ -99,7 +100,7 @@ For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`, `,
}, { }, {
Name: "token", Name: "token",
Help: `Session Token Help: `Session Token.
This is a session token which rclone caches in the config file. It is This is a session token which rclone caches in the config file. It is
usually valid for 1 hour. usually valid for 1 hour.
@ -109,14 +110,14 @@ Don't set this value - rclone will set it automatically.
Advanced: true, Advanced: true,
}, { }, {
Name: "token_expiry", Name: "token_expiry",
Help: `Token expiry time Help: `Token expiry time.
Don't set this value - rclone will set it automatically. Don't set this value - rclone will set it automatically.
`, `,
Advanced: true, Advanced: true,
}, { }, {
Name: "version", Name: "version",
Help: `Version read from the file fabric Help: `Version read from the file fabric.
Don't set this value - rclone will set it automatically. Don't set this value - rclone will set it automatically.
`, `,

View File

@ -48,7 +48,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "host", Name: "host",
Help: "FTP host to connect to", Help: "FTP host to connect to.",
Required: true, Required: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "ftp.example.com", Value: "ftp.example.com",
@ -56,18 +56,19 @@ func init() {
}}, }},
}, { }, {
Name: "user", Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser, Help: "FTP username, leave blank for current username, " + currentUser + ".",
}, { }, {
Name: "port", Name: "port",
Help: "FTP port, leave blank to use default (21)", Help: "FTP port, leave blank to use default (21).",
}, { }, {
Name: "pass", Name: "pass",
Help: "FTP password", Help: "FTP password.",
IsPassword: true, IsPassword: true,
Required: true, Required: true,
}, { }, {
Name: "tls", Name: "tls",
Help: `Use Implicit FTPS (FTP over TLS) Help: `Use Implicit FTPS (FTP over TLS).
When using implicit FTP over TLS the client connects using TLS When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather non-TLS-aware servers. This is usually served over port 990 rather
@ -75,35 +76,36 @@ than port 21. Cannot be used in combination with explicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "explicit_tls", Name: "explicit_tls",
Help: `Use Explicit FTPS (FTP over TLS) Help: `Use Explicit FTPS (FTP over TLS).
When using explicit FTP over TLS the client explicitly requests When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`, to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "concurrency", Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited", Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
Default: 0, Default: 0,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_check_certificate", Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server", Help: "Do not verify the TLS certificate of the server.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "disable_epsv", Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support", Help: "Disable using EPSV even if server advertises support.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "disable_mlsd", Name: "disable_mlsd",
Help: "Disable using MLSD even if server advertises support", Help: "Disable using MLSD even if server advertises support.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "idle_timeout", Name: "idle_timeout",
Default: fs.Duration(60 * time.Second), Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections Help: `Max time before closing idle connections.
If no connections have been returned to the connection pool in the time If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool. given, rclone will empty the connection pool.

View File

@ -89,58 +89,58 @@ func init() {
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number", Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.", Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
}, { }, {
Name: "anonymous", Name: "anonymous",
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.", Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
Default: false, Default: false,
}, { }, {
Name: "object_acl", Name: "object_acl",
Help: "Access Control List for new objects.", Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "authenticatedRead", Value: "authenticatedRead",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.", Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
}, { }, {
Value: "bucketOwnerFullControl", Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.", Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
}, { }, {
Value: "bucketOwnerRead", Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access, and project team owners get READER access.", Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
}, { }, {
Value: "private", Value: "private",
Help: "Object owner gets OWNER access [default if left blank].", Help: "Object owner gets OWNER access.\nDefault if left blank.",
}, { }, {
Value: "projectPrivate", Value: "projectPrivate",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.", Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
}, { }, {
Value: "publicRead", Value: "publicRead",
Help: "Object owner gets OWNER access, and all Users get READER access.", Help: "Object owner gets OWNER access.\nAll Users get READER access.",
}}, }},
}, { }, {
Name: "bucket_acl", Name: "bucket_acl",
Help: "Access Control List for new buckets.", Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "authenticatedRead", Value: "authenticatedRead",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.", Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
}, { }, {
Value: "private", Value: "private",
Help: "Project team owners get OWNER access [default if left blank].", Help: "Project team owners get OWNER access.\nDefault if left blank.",
}, { }, {
Value: "projectPrivate", Value: "projectPrivate",
Help: "Project team members get access according to their roles.", Help: "Project team members get access according to their roles.",
}, { }, {
Value: "publicRead", Value: "publicRead",
Help: "Project team owners get OWNER access, and all Users get READER access.", Help: "Project team owners get OWNER access.\nAll Users get READER access.",
}, { }, {
Value: "publicReadWrite", Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.", Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
}}, }},
}, { }, {
Name: "bucket_policy_only", Name: "bucket_policy_only",
@ -163,64 +163,64 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Help: "Location for the newly created buckets.", Help: "Location for the newly created buckets.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
Help: "Empty for default location (US).", Help: "Empty for default location (US)",
}, { }, {
Value: "asia", Value: "asia",
Help: "Multi-regional location for Asia.", Help: "Multi-regional location for Asia",
}, { }, {
Value: "eu", Value: "eu",
Help: "Multi-regional location for Europe.", Help: "Multi-regional location for Europe",
}, { }, {
Value: "us", Value: "us",
Help: "Multi-regional location for United States.", Help: "Multi-regional location for United States",
}, { }, {
Value: "asia-east1", Value: "asia-east1",
Help: "Taiwan.", Help: "Taiwan",
}, { }, {
Value: "asia-east2", Value: "asia-east2",
Help: "Hong Kong.", Help: "Hong Kong",
}, { }, {
Value: "asia-northeast1", Value: "asia-northeast1",
Help: "Tokyo.", Help: "Tokyo",
}, { }, {
Value: "asia-south1", Value: "asia-south1",
Help: "Mumbai.", Help: "Mumbai",
}, { }, {
Value: "asia-southeast1", Value: "asia-southeast1",
Help: "Singapore.", Help: "Singapore",
}, { }, {
Value: "australia-southeast1", Value: "australia-southeast1",
Help: "Sydney.", Help: "Sydney",
}, { }, {
Value: "europe-north1", Value: "europe-north1",
Help: "Finland.", Help: "Finland",
}, { }, {
Value: "europe-west1", Value: "europe-west1",
Help: "Belgium.", Help: "Belgium",
}, { }, {
Value: "europe-west2", Value: "europe-west2",
Help: "London.", Help: "London",
}, { }, {
Value: "europe-west3", Value: "europe-west3",
Help: "Frankfurt.", Help: "Frankfurt",
}, { }, {
Value: "europe-west4", Value: "europe-west4",
Help: "Netherlands.", Help: "Netherlands",
}, { }, {
Value: "us-central1", Value: "us-central1",
Help: "Iowa.", Help: "Iowa",
}, { }, {
Value: "us-east1", Value: "us-east1",
Help: "South Carolina.", Help: "South Carolina",
}, { }, {
Value: "us-east4", Value: "us-east4",
Help: "Northern Virginia.", Help: "Northern Virginia",
}, { }, {
Value: "us-west1", Value: "us-west1",
Help: "Oregon.", Help: "Oregon",
}, { }, {
Value: "us-west2", Value: "us-west2",
Help: "California.", Help: "California",
}}, }},
}, { }, {
Name: "storage_class", Name: "storage_class",

View File

@ -132,7 +132,7 @@ you want to read the media.`,
}, { }, {
Name: "start_year", Name: "start_year",
Default: 2000, Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`, Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "include_archived", Name: "include_archived",

View File

@ -19,23 +19,23 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "namenode", Name: "namenode",
Help: "hadoop name node and port", Help: "Hadoop name node and port.",
Required: true, Required: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "namenode:8020", Value: "namenode:8020",
Help: "Connect to host namenode at port 8020", Help: "Connect to host namenode at port 8020.",
}}, }},
}, { }, {
Name: "username", Name: "username",
Help: "hadoop user name", Help: "Hadoop user name.",
Required: false, Required: false,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "root", Value: "root",
Help: "Connect to hdfs as root", Help: "Connect to hdfs as root.",
}}, }},
}, { }, {
Name: "service_principal_name", Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode Help: `Kerberos service principal name for the namenode.
Enables KERBEROS authentication. Specifies the Service Principal Name Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode.`, (SERVICE/FQDN) for the namenode.`,
@ -47,7 +47,7 @@ Enables KERBEROS authentication. Specifies the Service Principal Name
Advanced: true, Advanced: true,
}, { }, {
Name: "data_transfer_protection", Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the checks, and wire encryption is required when communicating the the

View File

@ -38,20 +38,20 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "url", Name: "url",
Help: "URL of http host to connect to", Help: "URL of http host to connect to.",
Required: true, Required: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "https://example.com", Value: "https://example.com",
Help: "Connect to example.com", Help: "Connect to example.com.",
}, { }, {
Value: "https://user:pass@example.com", Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password", Help: "Connect to example.com using a username and password.",
}}, }},
}, { }, {
Name: "headers", Name: "headers",
Help: `Set HTTP headers for all transactions Help: `Set HTTP headers for all transactions.
Use this to set additional HTTP headers for all transactions Use this to set additional HTTP headers for all transactions.
The input format is comma separated list of key,value pairs. Standard The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used. [CSV encoding](https://godoc.org/encoding/csv) may be used.
@ -64,7 +64,7 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
Advanced: true, Advanced: true,
}, { }, {
Name: "no_slash", Name: "no_slash",
Help: `Set this if the site doesn't end directories with / Help: `Set this if the site doesn't end directories with /.
Use this if your target website does not use / on the end of Use this if your target website does not use / on the end of
directories. directories.
@ -80,7 +80,7 @@ directories.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_head", Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing Help: `Don't use HEAD requests to find file sizes in dir listing.
If your site is being very slow to load then you can try this option. If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a Normally rclone does a HEAD request for each potential file in a

View File

@ -86,7 +86,7 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "trashed_only", Name: "trashed_only",
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.", Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
@ -122,15 +122,15 @@ func init() {
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
switch config.State { switch config.State {
case "": case "":
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{ return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type.`, []fs.OptionExample{{
Value: "standard", Value: "standard",
Help: "Standard authentication - use this if you're a normal Jottacloud user.", Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
}, { }, {
Value: "legacy", Value: "legacy",
Help: "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.", Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, { }, {
Value: "telia", Value: "telia",
Help: "Telia Cloud authentication - use this if you are using Telia Cloud.", Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
}}) }})
case "auth_type_done": case "auth_type_done":
// Jump to next state according to config chosen // Jump to next state according to config chosen

View File

@ -32,29 +32,29 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "endpoint", Name: "endpoint",
Help: "The Koofr API endpoint to use", Help: "The Koofr API endpoint to use.",
Default: "https://app.koofr.net", Default: "https://app.koofr.net",
Required: true, Required: true,
Advanced: true, Advanced: true,
}, { }, {
Name: "mountid", Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.", Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
Required: false, Required: false,
Default: "", Default: "",
Advanced: true, Advanced: true,
}, { }, {
Name: "setmtime", Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.", Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true, Default: true,
Required: true, Required: true,
Advanced: true, Advanced: true,
}, { }, {
Name: "user", Name: "user",
Help: "Your Koofr user name", Help: "Your Koofr user name.",
Required: true, Required: true,
}, { }, {
Name: "password", Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)", Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
IsPassword: true, IsPassword: true,
Required: true, Required: true,
}, { }, {

View File

@ -44,11 +44,11 @@ func init() {
CommandHelp: commandHelp, CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "nounc", Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows", Help: "Disable UNC (long path names) conversion on Windows.",
Advanced: runtime.GOOS != "windows", Advanced: runtime.GOOS != "windows",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "true", Value: "true",
Help: "Disables long file names", Help: "Disables long file names.",
}}, }},
}, { }, {
Name: "copy_links", Name: "copy_links",
@ -59,7 +59,7 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "links", Name: "links",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension", Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false, Default: false,
NoPrefix: true, NoPrefix: true,
ShortOpt: "l", ShortOpt: "l",
@ -67,6 +67,7 @@ func init() {
}, { }, {
Name: "skip_links", Name: "skip_links",
Help: `Don't warn about skipped symlinks. Help: `Don't warn about skipped symlinks.
This flag disables warning messages on skipped symlinks or junction This flag disables warning messages on skipped symlinks or junction
points, as you explicitly acknowledge that they should be skipped.`, points, as you explicitly acknowledge that they should be skipped.`,
Default: false, Default: false,
@ -74,21 +75,21 @@ points, as you explicitly acknowledge that they should be skipped.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "zero_size_links", Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated) Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
- Windows - Windows
- On some virtual filesystems (such ash LucidLink) - On some virtual filesystems (such ash LucidLink)
- Android - Android
So rclone now always reads the link So rclone now always reads the link.
`, `,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "unicode_normalization", Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames Help: `Apply unicode NFC normalization to paths and filenames.
This flag can be used to normalize file names into unicode NFC form This flag can be used to normalize file names into unicode NFC form
that are read from the local filesystem. that are read from the local filesystem.
@ -106,7 +107,7 @@ routine so this flag shouldn't normally be used.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_check_updated", Name: "no_check_updated",
Help: `Don't check to see if the files change during upload Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy are being uploaded and aborts with a message which starts "can't copy
@ -152,7 +153,7 @@ to override the default choice.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "case_insensitive", Name: "case_insensitive",
Help: `Force the filesystem to report itself as case insensitive Help: `Force the filesystem to report itself as case insensitive.
Normally the local backend declares itself as case insensitive on Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag Windows/macOS and case sensitive for everything else. Use this flag
@ -161,7 +162,7 @@ to override the default choice.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_preallocate", Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files Help: `Disable preallocation of disk space for transferred files.
Preallocation of disk space helps prevent filesystem fragmentation. Preallocation of disk space helps prevent filesystem fragmentation.
However, some virtual filesystem layers (such as Google Drive File However, some virtual filesystem layers (such as Google Drive File
@ -172,7 +173,7 @@ Use this flag to disable preallocation.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_sparse", Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads Help: `Disable sparse files for multi-thread downloads.
On Windows platforms rclone will make sparse files when doing On Windows platforms rclone will make sparse files when doing
multi-thread downloads. This avoids long pauses on large files where multi-thread downloads. This avoids long pauses on large files where
@ -182,7 +183,7 @@ cause disk fragmentation and can be slow to work with.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_set_modtime", Name: "no_set_modtime",
Help: `Disable setting modtime Help: `Disable setting modtime.
Normally rclone updates modification time of files after they are done Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when uploading. This can cause permissions issues on Linux platforms when

View File

@ -87,11 +87,11 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "user", Name: "user",
Help: "User name (usually email)", Help: "User name (usually email).",
Required: true, Required: true,
}, { }, {
Name: "pass", Name: "pass",
Help: "Password", Help: "Password.",
Required: true, Required: true,
IsPassword: true, IsPassword: true,
}, { }, {
@ -99,6 +99,7 @@ func init() {
Default: true, Default: true,
Advanced: false, Advanced: false,
Help: `Skip full upload if there is another file with same data hash. Help: `Skip full upload if there is another file with same data hash.
This feature is called "speedup" or "put by hash". It is especially efficient This feature is called "speedup" or "put by hash". It is especially efficient
in case of generally available files like popular books, video or audio clips, in case of generally available files like popular books, video or audio clips,
because files are searched by hash in all accounts of all mailru users. because files are searched by hash in all accounts of all mailru users.
@ -119,6 +120,7 @@ streaming or partial uploads), it will not even try this optimization.`,
Default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", Default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf",
Advanced: true, Advanced: true,
Help: `Comma separated list of file name patterns eligible for speedup (put by hash). Help: `Comma separated list of file name patterns eligible for speedup (put by hash).
Patterns are case insensitive and can contain '*' or '?' meta characters.`, Patterns are case insensitive and can contain '*' or '?' meta characters.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
@ -137,8 +139,9 @@ Patterns are case insensitive and can contain '*' or '?' meta characters.`,
Name: "speedup_max_disk", Name: "speedup_max_disk",
Default: fs.SizeSuffix(3 * 1024 * 1024 * 1024), Default: fs.SizeSuffix(3 * 1024 * 1024 * 1024),
Advanced: true, Advanced: true,
Help: `This option allows you to disable speedup (put by hash) for large files Help: `This option allows you to disable speedup (put by hash) for large files.
(because preliminary hashing can exhaust you RAM or disk space)`,
Reason is that preliminary hashing can exhaust your RAM or disk space.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "0", Value: "0",
Help: "Completely disable speedup (put by hash).", Help: "Completely disable speedup (put by hash).",
@ -168,7 +171,7 @@ Patterns are case insensitive and can contain '*' or '?' meta characters.`,
Name: "check_hash", Name: "check_hash",
Default: true, Default: true,
Advanced: true, Advanced: true,
Help: "What should copy do if file checksum is mismatched or invalid", Help: "What should copy do if file checksum is mismatched or invalid.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "true", Value: "true",
Help: "Fail with error.", Help: "Fail with error.",
@ -182,6 +185,7 @@ Patterns are case insensitive and can contain '*' or '?' meta characters.`,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Help: `HTTP user agent used internally by client. Help: `HTTP user agent used internally by client.
Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`, Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
}, { }, {
Name: "quirks", Name: "quirks",
@ -189,6 +193,7 @@ Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Help: `Comma separated list of internal maintenance flags. Help: `Comma separated list of internal maintenance flags.
This option must not be used by an ordinary user. It is intended only to This option must not be used by an ordinary user. It is intended only to
facilitate remote troubleshooting of backend issues. Strict meaning of facilitate remote troubleshooting of backend issues. Strict meaning of
flags is not documented and not guaranteed to persist between releases. flags is not documented and not guaranteed to persist between releases.

View File

@ -59,7 +59,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "user", Name: "user",
Help: "User name", Help: "User name.",
Required: true, Required: true,
}, { }, {
Name: "pass", Name: "pass",

View File

@ -129,12 +129,12 @@ Note that the chunks will be buffered into memory.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "drive_id", Name: "drive_id",
Help: "The ID of the drive to use", Help: "The ID of the drive to use.",
Default: "", Default: "",
Advanced: true, Advanced: true,
}, { }, {
Name: "drive_type", Name: "drive_type",
Help: "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )", Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
Default: "", Default: "",
Advanced: true, Advanced: true,
}, { }, {
@ -165,7 +165,7 @@ fall back to normal copy (which will be slightly slower).`,
}, { }, {
Name: "no_versions", Name: "no_versions",
Default: false, Default: false,
Help: `Remove all versions on modifying operations Help: `Remove all versions on modifying operations.
Onedrive for business creates versions when rclone uploads new files Onedrive for business creates versions when rclone uploads new files
overwriting an existing one and when it sets the modification time. overwriting an existing one and when it sets the modification time.
@ -186,10 +186,10 @@ this flag there.
Advanced: true, Advanced: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "anonymous", Value: "anonymous",
Help: "Anyone with the link has access, without needing to sign in. This may include people outside of your organization. Anonymous link support may be disabled by an administrator.", Help: "Anyone with the link has access, without needing to sign in.\nThis may include people outside of your organization.\nAnonymous link support may be disabled by an administrator.",
}, { }, {
Value: "organization", Value: "organization",
Help: "Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.", Help: "Anyone signed into your organization (tenant) can use the link to get access.\nOnly available in OneDrive for Business and SharePoint.",
}}, }},
}, { }, {
Name: "link_type", Name: "link_type",
@ -399,7 +399,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
Help: "Root Sharepoint site", Help: "Root Sharepoint site",
}, { }, {
Value: "url", Value: "url",
Help: "Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)", Help: "Sharepoint site name or URL\nE.g. mysite or https://contoso.sharepoint.com/sites/mysite",
}, { }, {
Value: "search", Value: "search",
Help: "Search for a Sharepoint site", Help: "Search for a Sharepoint site",
@ -411,7 +411,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
Help: "Type in SiteID (advanced)", Help: "Type in SiteID (advanced)",
}, { }, {
Value: "path", Value: "path",
Help: "Sharepoint server-relative path (advanced, e.g. /teams/hr)", Help: "Sharepoint server-relative path (advanced)\nE.g. /teams/hr",
}}) }})
case "choose_type_done": case "choose_type_done":
// Jump to next state according to config chosen // Jump to next state according to config chosen

View File

@ -42,7 +42,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "username", Name: "username",
Help: "Username", Help: "Username.",
Required: true, Required: true,
}, { }, {
Name: "password", Name: "password",

View File

@ -40,36 +40,36 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "env_auth", Name: "env_auth",
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.", Help: "Get QingStor credentials from runtime.\n\nOnly applies if access_key_id and secret_access_key is blank.",
Default: false, Default: false,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "false", Value: "false",
Help: "Enter QingStor credentials in the next step", Help: "Enter QingStor credentials in the next step.",
}, { }, {
Value: "true", Value: "true",
Help: "Get QingStor credentials from the environment (env vars or IAM)", Help: "Get QingStor credentials from the environment (env vars or IAM).",
}}, }},
}, { }, {
Name: "access_key_id", Name: "access_key_id",
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.", Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
}, { }, {
Name: "secret_access_key", Name: "secret_access_key",
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.", Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Enter an endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"", Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",
}, { }, {
Name: "zone", Name: "zone",
Help: "Zone to connect to.\nDefault is \"pek3a\".", Help: "Zone to connect to.\n\nDefault is \"pek3a\".",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "pek3a", Value: "pek3a",
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.", Help: "The Beijing (China) Three Zone.\nNeeds location constraint pek3a.",
}, { }, {
Value: "sh1a", Value: "sh1a",
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.", Help: "The Shanghai (China) First Zone.\nNeeds location constraint sh1a.",
}, { }, {
Value: "gd2a", Value: "gd2a",
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.", Help: "The Guangdong (China) Second Zone.\nNeeds location constraint gd2a.",
}}, }},
}, { }, {
Name: "connection_retries", Name: "connection_retries",
@ -78,7 +78,7 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size. Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5 GiB.`, The minimum is 0 and the maximum is 5 GiB.`,

View File

@ -109,21 +109,21 @@ func init() {
}}, }},
}, { }, {
Name: "env_auth", Name: "env_auth",
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.", Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\n\nOnly applies if access_key_id and secret_access_key is blank.",
Default: false, Default: false,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "false", Value: "false",
Help: "Enter AWS credentials in the next step", Help: "Enter AWS credentials in the next step.",
}, { }, {
Value: "true", Value: "true",
Help: "Get AWS credentials from the environment (env vars or IAM)", Help: "Get AWS credentials from the environment (env vars or IAM).",
}}, }},
}, { }, {
Name: "access_key_id", Name: "access_key_id",
Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.", Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
}, { }, {
Name: "secret_access_key", Name: "secret_access_key",
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.", Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
}, { }, {
// References: // References:
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html // 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
@ -136,76 +136,76 @@ func init() {
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia, or Pacific Northwest.\nLeave location constraint empty.", Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia, or Pacific Northwest.\nLeave location constraint empty.",
}, { }, {
Value: "us-east-2", Value: "us-east-2",
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.", Help: "US East (Ohio) Region.\nNeeds location constraint us-east-2.",
}, { }, {
Value: "us-west-1", Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.", Help: "US West (Northern California) Region.\nNeeds location constraint us-west-1.",
}, { }, {
Value: "us-west-2", Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.", Help: "US West (Oregon) Region.\nNeeds location constraint us-west-2.",
}, { }, {
Value: "ca-central-1", Value: "ca-central-1",
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.", Help: "Canada (Central) Region.\nNeeds location constraint ca-central-1.",
}, { }, {
Value: "eu-west-1", Value: "eu-west-1",
Help: "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.", Help: "EU (Ireland) Region.\nNeeds location constraint EU or eu-west-1.",
}, { }, {
Value: "eu-west-2", Value: "eu-west-2",
Help: "EU (London) Region\nNeeds location constraint eu-west-2.", Help: "EU (London) Region.\nNeeds location constraint eu-west-2.",
}, { }, {
Value: "eu-west-3", Value: "eu-west-3",
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.", Help: "EU (Paris) Region.\nNeeds location constraint eu-west-3.",
}, { }, {
Value: "eu-north-1", Value: "eu-north-1",
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.", Help: "EU (Stockholm) Region.\nNeeds location constraint eu-north-1.",
}, { }, {
Value: "eu-south-1", Value: "eu-south-1",
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.", Help: "EU (Milan) Region.\nNeeds location constraint eu-south-1.",
}, { }, {
Value: "eu-central-1", Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.", Help: "EU (Frankfurt) Region.\nNeeds location constraint eu-central-1.",
}, { }, {
Value: "ap-southeast-1", Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.", Help: "Asia Pacific (Singapore) Region.\nNeeds location constraint ap-southeast-1.",
}, { }, {
Value: "ap-southeast-2", Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.", Help: "Asia Pacific (Sydney) Region.\nNeeds location constraint ap-southeast-2.",
}, { }, {
Value: "ap-northeast-1", Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.", Help: "Asia Pacific (Tokyo) Region.\nNeeds location constraint ap-northeast-1.",
}, { }, {
Value: "ap-northeast-2", Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.", Help: "Asia Pacific (Seoul).\nNeeds location constraint ap-northeast-2.",
}, { }, {
Value: "ap-northeast-3", Value: "ap-northeast-3",
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.", Help: "Asia Pacific (Osaka-Local).\nNeeds location constraint ap-northeast-3.",
}, { }, {
Value: "ap-south-1", Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.", Help: "Asia Pacific (Mumbai).\nNeeds location constraint ap-south-1.",
}, { }, {
Value: "ap-east-1", Value: "ap-east-1",
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.", Help: "Asia Pacific (Hong Kong) Region.\nNeeds location constraint ap-east-1.",
}, { }, {
Value: "sa-east-1", Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.", Help: "South America (Sao Paulo) Region.\nNeeds location constraint sa-east-1.",
}, { }, {
Value: "me-south-1", Value: "me-south-1",
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.", Help: "Middle East (Bahrain) Region.\nNeeds location constraint me-south-1.",
}, { }, {
Value: "af-south-1", Value: "af-south-1",
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.", Help: "Africa (Cape Town) Region.\nNeeds location constraint af-south-1.",
}, { }, {
Value: "cn-north-1", Value: "cn-north-1",
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.", Help: "China (Beijing) Region.\nNeeds location constraint cn-north-1.",
}, { }, {
Value: "cn-northwest-1", Value: "cn-northwest-1",
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.", Help: "China (Ningxia) Region.\nNeeds location constraint cn-northwest-1.",
}, { }, {
Value: "us-gov-east-1", Value: "us-gov-east-1",
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.", Help: "AWS GovCloud (US-East) Region.\nNeeds location constraint us-gov-east-1.",
}, { }, {
Value: "us-gov-west-1", Value: "us-gov-west-1",
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.", Help: "AWS GovCloud (US) Region.\nNeeds location constraint us-gov-west-1.",
}}, }},
}, { }, {
Name: "region", Name: "region",
@ -220,22 +220,22 @@ func init() {
}}, }},
}, { }, {
Name: "region", Name: "region",
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.", Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,Scaleway,TencentCOS", Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.", Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
}, { }, {
Value: "other-v2-signature", Value: "other-v2-signature",
Help: "Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.", Help: "Use this only if v4 signatures don't work.\nE.g. pre Jewel/v10 CEPH.",
}}, }},
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.", Help: "Endpoint for S3 API.\n\nLeave blank if using AWS to use the default endpoint for the region.",
Provider: "AWS", Provider: "AWS",
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.", Help: "Endpoint for IBM COS S3 API.\n\nSpecify if using an IBM COS On Premise.",
Provider: "IBMCOS", Provider: "IBMCOS",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "s3.us.cloud-object-storage.appdomain.cloud", Value: "s3.us.cloud-object-storage.appdomain.cloud",
@ -537,65 +537,65 @@ func init() {
Provider: "TencentCOS", Provider: "TencentCOS",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "cos.ap-beijing.myqcloud.com", Value: "cos.ap-beijing.myqcloud.com",
Help: "Beijing Region.", Help: "Beijing Region",
}, { }, {
Value: "cos.ap-nanjing.myqcloud.com", Value: "cos.ap-nanjing.myqcloud.com",
Help: "Nanjing Region.", Help: "Nanjing Region",
}, { }, {
Value: "cos.ap-shanghai.myqcloud.com", Value: "cos.ap-shanghai.myqcloud.com",
Help: "Shanghai Region.", Help: "Shanghai Region",
}, { }, {
Value: "cos.ap-guangzhou.myqcloud.com", Value: "cos.ap-guangzhou.myqcloud.com",
Help: "Guangzhou Region.", Help: "Guangzhou Region",
}, { }, {
Value: "cos.ap-nanjing.myqcloud.com", Value: "cos.ap-nanjing.myqcloud.com",
Help: "Nanjing Region.", Help: "Nanjing Region",
}, { }, {
Value: "cos.ap-chengdu.myqcloud.com", Value: "cos.ap-chengdu.myqcloud.com",
Help: "Chengdu Region.", Help: "Chengdu Region",
}, { }, {
Value: "cos.ap-chongqing.myqcloud.com", Value: "cos.ap-chongqing.myqcloud.com",
Help: "Chongqing Region.", Help: "Chongqing Region",
}, { }, {
Value: "cos.ap-hongkong.myqcloud.com", Value: "cos.ap-hongkong.myqcloud.com",
Help: "Hong Kong (China) Region.", Help: "Hong Kong (China) Region",
}, { }, {
Value: "cos.ap-singapore.myqcloud.com", Value: "cos.ap-singapore.myqcloud.com",
Help: "Singapore Region.", Help: "Singapore Region",
}, { }, {
Value: "cos.ap-mumbai.myqcloud.com", Value: "cos.ap-mumbai.myqcloud.com",
Help: "Mumbai Region.", Help: "Mumbai Region",
}, { }, {
Value: "cos.ap-seoul.myqcloud.com", Value: "cos.ap-seoul.myqcloud.com",
Help: "Seoul Region.", Help: "Seoul Region",
}, { }, {
Value: "cos.ap-bangkok.myqcloud.com", Value: "cos.ap-bangkok.myqcloud.com",
Help: "Bangkok Region.", Help: "Bangkok Region",
}, { }, {
Value: "cos.ap-tokyo.myqcloud.com", Value: "cos.ap-tokyo.myqcloud.com",
Help: "Tokyo Region.", Help: "Tokyo Region",
}, { }, {
Value: "cos.na-siliconvalley.myqcloud.com", Value: "cos.na-siliconvalley.myqcloud.com",
Help: "Silicon Valley Region.", Help: "Silicon Valley Region",
}, { }, {
Value: "cos.na-ashburn.myqcloud.com", Value: "cos.na-ashburn.myqcloud.com",
Help: "Virginia Region.", Help: "Virginia Region",
}, { }, {
Value: "cos.na-toronto.myqcloud.com", Value: "cos.na-toronto.myqcloud.com",
Help: "Toronto Region.", Help: "Toronto Region",
}, { }, {
Value: "cos.eu-frankfurt.myqcloud.com", Value: "cos.eu-frankfurt.myqcloud.com",
Help: "Frankfurt Region.", Help: "Frankfurt Region",
}, { }, {
Value: "cos.eu-moscow.myqcloud.com", Value: "cos.eu-moscow.myqcloud.com",
Help: "Moscow Region.", Help: "Moscow Region",
}, { }, {
Value: "cos.accelerate.myqcloud.com", Value: "cos.accelerate.myqcloud.com",
Help: "Use Tencent COS Accelerate Endpoint.", Help: "Use Tencent COS Accelerate Endpoint",
}}, }},
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.", Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath", Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io", Value: "objects-us-east-1.dream.io",
@ -636,87 +636,87 @@ func init() {
}}, }},
}, { }, {
Name: "location_constraint", Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.", Help: "Location constraint - must be set to match the Region.\n\nUsed when creating buckets only.",
Provider: "AWS", Provider: "AWS",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
Help: "Empty for US Region, Northern Virginia, or Pacific Northwest.", Help: "Empty for US Region, Northern Virginia, or Pacific Northwest",
}, { }, {
Value: "us-east-2", Value: "us-east-2",
Help: "US East (Ohio) Region.", Help: "US East (Ohio) Region",
}, { }, {
Value: "us-west-1", Value: "us-west-1",
Help: "US West (Northern California) Region.", Help: "US West (Northern California) Region",
}, { }, {
Value: "us-west-2", Value: "us-west-2",
Help: "US West (Oregon) Region.", Help: "US West (Oregon) Region",
}, { }, {
Value: "ca-central-1", Value: "ca-central-1",
Help: "Canada (Central) Region.", Help: "Canada (Central) Region",
}, { }, {
Value: "eu-west-1", Value: "eu-west-1",
Help: "EU (Ireland) Region.", Help: "EU (Ireland) Region",
}, { }, {
Value: "eu-west-2", Value: "eu-west-2",
Help: "EU (London) Region.", Help: "EU (London) Region",
}, { }, {
Value: "eu-west-3", Value: "eu-west-3",
Help: "EU (Paris) Region.", Help: "EU (Paris) Region",
}, { }, {
Value: "eu-north-1", Value: "eu-north-1",
Help: "EU (Stockholm) Region.", Help: "EU (Stockholm) Region",
}, { }, {
Value: "eu-south-1", Value: "eu-south-1",
Help: "EU (Milan) Region.", Help: "EU (Milan) Region",
}, { }, {
Value: "EU", Value: "EU",
Help: "EU Region.", Help: "EU Region",
}, { }, {
Value: "ap-southeast-1", Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region.", Help: "Asia Pacific (Singapore) Region",
}, { }, {
Value: "ap-southeast-2", Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region.", Help: "Asia Pacific (Sydney) Region",
}, { }, {
Value: "ap-northeast-1", Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region.", Help: "Asia Pacific (Tokyo) Region",
}, { }, {
Value: "ap-northeast-2", Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul) Region.", Help: "Asia Pacific (Seoul) Region",
}, { }, {
Value: "ap-northeast-3", Value: "ap-northeast-3",
Help: "Asia Pacific (Osaka-Local) Region.", Help: "Asia Pacific (Osaka-Local) Region",
}, { }, {
Value: "ap-south-1", Value: "ap-south-1",
Help: "Asia Pacific (Mumbai) Region.", Help: "Asia Pacific (Mumbai) Region",
}, { }, {
Value: "ap-east-1", Value: "ap-east-1",
Help: "Asia Pacific (Hong Kong) Region.", Help: "Asia Pacific (Hong Kong) Region",
}, { }, {
Value: "sa-east-1", Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.", Help: "South America (Sao Paulo) Region",
}, { }, {
Value: "me-south-1", Value: "me-south-1",
Help: "Middle East (Bahrain) Region.", Help: "Middle East (Bahrain) Region",
}, { }, {
Value: "af-south-1", Value: "af-south-1",
Help: "Africa (Cape Town) Region.", Help: "Africa (Cape Town) Region",
}, { }, {
Value: "cn-north-1", Value: "cn-north-1",
Help: "China (Beijing) Region", Help: "China (Beijing) Region",
}, { }, {
Value: "cn-northwest-1", Value: "cn-northwest-1",
Help: "China (Ningxia) Region.", Help: "China (Ningxia) Region",
}, { }, {
Value: "us-gov-east-1", Value: "us-gov-east-1",
Help: "AWS GovCloud (US-East) Region.", Help: "AWS GovCloud (US-East) Region",
}, { }, {
Value: "us-gov-west-1", Value: "us-gov-west-1",
Help: "AWS GovCloud (US) Region.", Help: "AWS GovCloud (US) Region",
}}, }},
}, { }, {
Name: "location_constraint", Name: "location_constraint",
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter", Help: "Location constraint - must match endpoint when using IBM Cloud Public.\n\nFor on-prem COS, do not make a selection from this list, hit enter.",
Provider: "IBMCOS", Provider: "IBMCOS",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "us-standard", Value: "us-standard",
@ -817,7 +817,7 @@ func init() {
}}, }},
}, { }, {
Name: "location_constraint", Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.", Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS", Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
}, { }, {
Name: "acl", Name: "acl",
@ -831,27 +831,27 @@ Note that this ACL is applied when server-side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`, doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "default", Value: "default",
Help: "Owner gets Full_CONTROL. No one else has access rights (default).", Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
Provider: "TencentCOS", Provider: "TencentCOS",
}, { }, {
Value: "private", Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).", Help: "Owner gets FULL_CONTROL.\nNo one else has access rights (default).",
Provider: "!IBMCOS,TencentCOS", Provider: "!IBMCOS,TencentCOS",
}, { }, {
Value: "public-read", Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.", Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.",
Provider: "!IBMCOS", Provider: "!IBMCOS",
}, { }, {
Value: "public-read-write", Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.", Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
Provider: "!IBMCOS", Provider: "!IBMCOS",
}, { }, {
Value: "authenticated-read", Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.", Help: "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.",
Provider: "!IBMCOS", Provider: "!IBMCOS",
}, { }, {
Value: "bucket-owner-read", Value: "bucket-owner-read",
Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.", Help: "Object owner gets FULL_CONTROL.\nBucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "!IBMCOS", Provider: "!IBMCOS",
}, { }, {
Value: "bucket-owner-full-control", Value: "bucket-owner-full-control",
@ -859,19 +859,19 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
Provider: "!IBMCOS", Provider: "!IBMCOS",
}, { }, {
Value: "private", Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS", Help: "Owner gets FULL_CONTROL.\nNo one else has access rights (default).\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.",
Provider: "IBMCOS", Provider: "IBMCOS",
}, { }, {
Value: "public-read", Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS", Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS.",
Provider: "IBMCOS", Provider: "IBMCOS",
}, { }, {
Value: "public-read-write", Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS", Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nThis acl is available on IBM Cloud (Infra), On-Premise IBM COS.",
Provider: "IBMCOS", Provider: "IBMCOS",
}, { }, {
Value: "authenticated-read", Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS", Help: "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.\nNot supported on Buckets.\nThis acl is available on IBM Cloud (Infra) and On-Premise IBM COS.",
Provider: "IBMCOS", Provider: "IBMCOS",
}}, }},
}, { }, {
@ -885,16 +885,16 @@ isn't set then "acl" is used instead.`,
Advanced: true, Advanced: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "private", Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).", Help: "Owner gets FULL_CONTROL.\nNo one else has access rights (default).",
}, { }, {
Value: "public-read", Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.", Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.",
}, { }, {
Value: "public-read-write", Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.", Help: "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
}, { }, {
Value: "authenticated-read", Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.", Help: "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.",
}}, }},
}, { }, {
Name: "requester_pays", Name: "requester_pays",
@ -1002,10 +1002,10 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: "Standard storage class", Help: "Standard storage class",
}, { }, {
Value: "GLACIER", Value: "GLACIER",
Help: "Archive storage mode.", Help: "Archive storage mode",
}, { }, {
Value: "STANDARD_IA", Value: "STANDARD_IA",
Help: "Infrequent access storage mode.", Help: "Infrequent access storage mode",
}}, }},
}, { }, {
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925 // Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
@ -1020,10 +1020,10 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: "Standard storage class", Help: "Standard storage class",
}, { }, {
Value: "ARCHIVE", Value: "ARCHIVE",
Help: "Archive storage mode.", Help: "Archive storage mode",
}, { }, {
Value: "STANDARD_IA", Value: "STANDARD_IA",
Help: "Infrequent access storage mode.", Help: "Infrequent access storage mode",
}}, }},
}, { }, {
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes // Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
@ -1032,17 +1032,17 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Provider: "Scaleway", Provider: "Scaleway",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
Help: "Default", Help: "Default.",
}, { }, {
Value: "STANDARD", Value: "STANDARD",
Help: "The Standard class for any upload; suitable for on-demand content like streaming or CDN.", Help: "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.",
}, { }, {
Value: "GLACIER", Value: "GLACIER",
Help: "Archived storage; prices are lower, but it needs to be restored first to be accessed.", Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.",
}}, }},
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size. Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5 GiB.`, The minimum is 0 and the maximum is 5 GiB.`,
@ -1090,7 +1090,7 @@ large file of a known size to stay below this number of chunks limit.
Advanced: true, Advanced: true,
}, { }, {
Name: "copy_cutoff", Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy Help: `Cutoff for switching to multipart copy.
Any files larger than this that need to be server-side copied will be Any files larger than this that need to be server-side copied will be
copied in chunks of this size. copied in chunks of this size.
@ -1100,7 +1100,7 @@ The minimum is 0 and the maximum is 5 GiB.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata Help: `Don't store MD5 checksum with object metadata.
Normally rclone will calculate the MD5 checksum of the input before Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great uploading it so it can add it to metadata on the object. This is great
@ -1110,7 +1110,7 @@ to start uploading.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "shared_credentials_file", Name: "shared_credentials_file",
Help: `Path to the shared credentials file Help: `Path to the shared credentials file.
If env_auth = true then rclone can use a shared credentials file. If env_auth = true then rclone can use a shared credentials file.
@ -1124,7 +1124,7 @@ it will default to the current user's home directory.
Advanced: true, Advanced: true,
}, { }, {
Name: "profile", Name: "profile",
Help: `Profile to use in the shared credentials file Help: `Profile to use in the shared credentials file.
If env_auth = true then rclone can use a shared credentials file. This If env_auth = true then rclone can use a shared credentials file. This
variable controls which profile is used in that file. variable controls which profile is used in that file.
@ -1135,7 +1135,7 @@ If empty it will default to the environment variable "AWS_PROFILE" or
Advanced: true, Advanced: true,
}, { }, {
Name: "session_token", Name: "session_token",
Help: "An AWS session token", Help: "An AWS session token.",
Advanced: true, Advanced: true,
}, { }, {
Name: "upload_concurrency", Name: "upload_concurrency",
@ -1205,7 +1205,7 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
Advanced: true, Advanced: true,
}, { }, {
Name: "no_check_bucket", Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it Help: `If set, don't attempt to check the bucket exists or create it.
This can be useful when trying to minimise the number of transactions This can be useful when trying to minimise the number of transactions
rclone does if you know the bucket exists already. rclone does if you know the bucket exists already.
@ -1218,7 +1218,7 @@ due to a bug.
Advanced: true, Advanced: true,
}, { }, {
Name: "no_head", Name: "no_head",
Help: `If set, don't HEAD uploaded objects to check integrity Help: `If set, don't HEAD uploaded objects to check integrity.
This can be useful when trying to minimise the number of transactions This can be useful when trying to minimise the number of transactions
rclone does. rclone does.
@ -1276,6 +1276,7 @@ very small even with this flag.
Default: memoryPoolFlushTime, Default: memoryPoolFlushTime,
Advanced: true, Advanced: true,
Help: `How often internal memory buffer pools will be flushed. Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`, This option controls how often unused buffers will be removed from the pool.`,
}, { }, {
@ -1287,7 +1288,7 @@ This option controls how often unused buffers will be removed from the pool.`,
Name: "disable_http2", Name: "disable_http2",
Default: false, Default: false,
Advanced: true, Advanced: true,
Help: `Disable usage of http2 for S3 backends Help: `Disable usage of http2 for S3 backends.
There is currently an unsolved issue with the s3 (specifically minio) backend There is currently an unsolved issue with the s3 (specifically minio) backend
and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be

View File

@ -60,41 +60,41 @@ func init() {
Config: Config, Config: Config,
Options: []fs.Option{{ Options: []fs.Option{{
Name: configURL, Name: configURL,
Help: "URL of seafile host to connect to", Help: "URL of seafile host to connect to.",
Required: true, Required: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "https://cloud.seafile.com/", Value: "https://cloud.seafile.com/",
Help: "Connect to cloud.seafile.com", Help: "Connect to cloud.seafile.com.",
}}, }},
}, { }, {
Name: configUser, Name: configUser,
Help: "User name (usually email address)", Help: "User name (usually email address).",
Required: true, Required: true,
}, { }, {
// Password is not required, it will be left blank for 2FA // Password is not required, it will be left blank for 2FA
Name: configPassword, Name: configPassword,
Help: "Password", Help: "Password.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: config2FA, Name: config2FA,
Help: "Two-factor authentication ('true' if the account has 2FA enabled)", Help: "Two-factor authentication ('true' if the account has 2FA enabled).",
Default: false, Default: false,
}, { }, {
Name: configLibrary, Name: configLibrary,
Help: "Name of the library. Leave blank to access all non-encrypted libraries.", Help: "Name of the library.\n\nLeave blank to access all non-encrypted libraries.",
}, { }, {
Name: configLibraryKey, Name: configLibraryKey,
Help: "Library password (for encrypted libraries only). Leave blank if you pass it through the command line.", Help: "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: configCreateLibrary, Name: configCreateLibrary,
Help: "Should rclone create a library if it doesn't exist", Help: "Should rclone create a library if it doesn't exist.",
Advanced: true, Advanced: true,
Default: false, Default: false,
}, { }, {
// Keep the authentication token after entering the 2FA code // Keep the authentication token after entering the 2FA code
Name: configAuthToken, Name: configAuthToken,
Help: "Authentication token", Help: "Authentication token.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,

View File

@ -56,28 +56,28 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "host", Name: "host",
Help: "SSH host to connect to", Help: "SSH host to connect to.",
Required: true, Required: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "example.com", Value: "example.com",
Help: "Connect to example.com", Help: "Connect to example.com.",
}}, }},
}, { }, {
Name: "user", Name: "user",
Help: "SSH username, leave blank for current username, " + currentUser, Help: "SSH username, leave blank for current username, " + currentUser + ".",
}, { }, {
Name: "port", Name: "port",
Help: "SSH port, leave blank to use default (22)", Help: "SSH port, leave blank to use default (22).",
}, { }, {
Name: "pass", Name: "pass",
Help: "SSH password, leave blank to use ssh-agent.", Help: "SSH password, leave blank to use ssh-agent.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "key_pem", Name: "key_pem",
Help: "Raw PEM-encoded private key, If specified, will override key_file parameter.", Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
}, { }, {
Name: "key_file", Name: "key_file",
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp, Help: "Path to PEM-encoded private key file.\n\nLeave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
}, { }, {
Name: "key_file_pass", Name: "key_file_pass",
Help: `The passphrase to decrypt the PEM-encoded private key file. Help: `The passphrase to decrypt the PEM-encoded private key file.
@ -98,7 +98,7 @@ Set this value to enable server host key validation.` + env.ShellExpandHelp,
Advanced: true, Advanced: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "~/.ssh/known_hosts", Value: "~/.ssh/known_hosts",
Help: "Use OpenSSH's known_hosts file", Help: "Use OpenSSH's known_hosts file.",
}}, }},
}, { }, {
Name: "key_use_agent", Name: "key_use_agent",
@ -135,7 +135,7 @@ Those algorithms are insecure and may allow plaintext data to be recovered by an
}, { }, {
Name: "disable_hashcheck", Name: "disable_hashcheck",
Default: false, Default: false,
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.", Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\n\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
}, { }, {
Name: "ask_password", Name: "ask_password",
Default: false, Default: false,
@ -170,12 +170,12 @@ Home directory can be found in a shared folder called "home"
}, { }, {
Name: "md5sum_command", Name: "md5sum_command",
Default: "", Default: "",
Help: "The command used to read md5 hashes. Leave blank for autodetect.", Help: "The command used to read md5 hashes.\n\nLeave blank for autodetect.",
Advanced: true, Advanced: true,
}, { }, {
Name: "sha1sum_command", Name: "sha1sum_command",
Default: "", Default: "",
Help: "The command used to read sha1 hashes. Leave blank for autodetect.", Help: "The command used to read sha1 hashes.\n\nLeave blank for autodetect.",
Advanced: true, Advanced: true,
}, { }, {
Name: "skip_links", Name: "skip_links",
@ -197,7 +197,7 @@ The subsystem option is ignored when server_command is defined.`,
}, { }, {
Name: "use_fstat", Name: "use_fstat",
Default: false, Default: false,
Help: `If set use fstat instead of stat Help: `If set use fstat instead of stat.
Some servers limit the amount of open files and calling Stat after opening Some servers limit the amount of open files and calling Stat after opening
the file will throw an error from the server. Setting this flag will call the file will throw an error from the server. Setting this flag will call
@ -211,7 +211,7 @@ any given time.
}, { }, {
Name: "disable_concurrent_reads", Name: "disable_concurrent_reads",
Default: false, Default: false,
Help: `If set don't use concurrent reads Help: `If set don't use concurrent reads.
Normally concurrent reads are safe to use and not using them will Normally concurrent reads are safe to use and not using them will
degrade performance, so this option is disabled by default. degrade performance, so this option is disabled by default.
@ -230,7 +230,7 @@ If concurrent reads are disabled, the use_fstat option is ignored.
}, { }, {
Name: "disable_concurrent_writes", Name: "disable_concurrent_writes",
Default: false, Default: false,
Help: `If set don't use concurrent writes Help: `If set don't use concurrent writes.
Normally rclone uses concurrent writes to upload files. This improves Normally rclone uses concurrent writes to upload files. This improves
the performance greatly, especially for distant servers. the performance greatly, especially for distant servers.
@ -241,7 +241,7 @@ This option disables concurrent writes should that be necessary.
}, { }, {
Name: "idle_timeout", Name: "idle_timeout",
Default: fs.Duration(60 * time.Second), Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections Help: `Max time before closing idle connections.
If no connections have been returned to the connection pool in the time If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool. given, rclone will empty the connection pool.

View File

@ -163,13 +163,13 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "root_folder_id", Name: "root_folder_id",
Help: `ID of the root folder Help: `ID of the root folder.
Leave blank to access "Personal Folders". You can use one of the Leave blank to access "Personal Folders". You can use one of the
standard values here or any folder ID (long hex number ID).`, standard values here or any folder ID (long hex number ID).`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
Help: `Access the Personal Folders. (Default)`, Help: `Access the Personal Folders (default).`,
}, { }, {
Value: "favorites", Value: "favorites",
Help: "Access the Favorites folder.", Help: "Access the Favorites folder.",
@ -186,7 +186,9 @@ standard values here or any folder ID (long hex number ID).`,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
Default: defaultChunkSize, Default: defaultChunkSize,
Help: `Upload chunk size. Must a power of 2 >= 256k. Help: `Upload chunk size.
Must a power of 2 >= 256k.
Making this larger will improve performance, but note that each chunk Making this larger will improve performance, but note that each chunk
is buffered in memory one per transfer. is buffered in memory one per transfer.

View File

@ -139,34 +139,34 @@ func init() {
Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.", Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
}, { }, {
Name: "private_access_key", Name: "private_access_key",
Help: "Sugarsync Private Access Key\n\nLeave blank to use rclone's.", Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
}, { }, {
Name: "hard_delete", Name: "hard_delete",
Help: "Permanently delete files if true\notherwise put them in the deleted files.", Help: "Permanently delete files if true\notherwise put them in the deleted files.",
Default: false, Default: false,
}, { }, {
Name: "refresh_token", Name: "refresh_token",
Help: "Sugarsync refresh token\n\nLeave blank normally, will be auto configured by rclone.", Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true, Advanced: true,
}, { }, {
Name: "authorization", Name: "authorization",
Help: "Sugarsync authorization\n\nLeave blank normally, will be auto configured by rclone.", Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true, Advanced: true,
}, { }, {
Name: "authorization_expiry", Name: "authorization_expiry",
Help: "Sugarsync authorization expiry\n\nLeave blank normally, will be auto configured by rclone.", Help: "Sugarsync authorization expiry.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true, Advanced: true,
}, { }, {
Name: "user", Name: "user",
Help: "Sugarsync user\n\nLeave blank normally, will be auto configured by rclone.", Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true, Advanced: true,
}, { }, {
Name: "root_id", Name: "root_id",
Help: "Sugarsync root id\n\nLeave blank normally, will be auto configured by rclone.", Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true, Advanced: true,
}, { }, {
Name: "deleted_id", Name: "deleted_id",
Help: "Sugarsync deleted folder id\n\nLeave blank normally, will be auto configured by rclone.", Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true, Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,

View File

@ -84,10 +84,10 @@ func init() {
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
Value: "false", Value: "false",
Help: "Enter swift credentials in the next step", Help: "Enter swift credentials in the next step.",
}, { }, {
Value: "true", Value: "true",
Help: "Get swift credentials from environment vars. Leave other fields blank if using this.", Help: "Get swift credentials from environment vars.\nLeave other fields blank if using this.",
}, },
}, },
}, { }, {
@ -100,23 +100,23 @@ func init() {
Name: "auth", Name: "auth",
Help: "Authentication URL for server (OS_AUTH_URL).", Help: "Authentication URL for server (OS_AUTH_URL).",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Help: "Rackspace US",
Value: "https://auth.api.rackspacecloud.com/v1.0", Value: "https://auth.api.rackspacecloud.com/v1.0",
Help: "Rackspace US",
}, { }, {
Help: "Rackspace UK",
Value: "https://lon.auth.api.rackspacecloud.com/v1.0", Value: "https://lon.auth.api.rackspacecloud.com/v1.0",
Help: "Rackspace UK",
}, { }, {
Help: "Rackspace v2",
Value: "https://identity.api.rackspacecloud.com/v2.0", Value: "https://identity.api.rackspacecloud.com/v2.0",
Help: "Rackspace v2",
}, { }, {
Help: "Memset Memstore UK",
Value: "https://auth.storage.memset.com/v1.0", Value: "https://auth.storage.memset.com/v1.0",
Help: "Memset Memstore UK",
}, { }, {
Help: "Memset Memstore UK v2",
Value: "https://auth.storage.memset.com/v2.0", Value: "https://auth.storage.memset.com/v2.0",
Help: "Memset Memstore UK v2",
}, { }, {
Help: "OVH",
Value: "https://auth.cloud.ovh.net/v3", Value: "https://auth.cloud.ovh.net/v3",
Help: "OVH",
}}, }},
}, { }, {
Name: "user_id", Name: "user_id",
@ -126,57 +126,59 @@ func init() {
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)", Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
}, { }, {
Name: "tenant", Name: "tenant",
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)", Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
}, { }, {
Name: "tenant_id", Name: "tenant_id",
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)", Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
}, { }, {
Name: "tenant_domain", Name: "tenant_domain",
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)", Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
}, { }, {
Name: "region", Name: "region",
Help: "Region name - optional (OS_REGION_NAME)", Help: "Region name - optional (OS_REGION_NAME).",
}, { }, {
Name: "storage_url", Name: "storage_url",
Help: "Storage URL - optional (OS_STORAGE_URL)", Help: "Storage URL - optional (OS_STORAGE_URL).",
}, { }, {
Name: "auth_token", Name: "auth_token",
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)", Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
}, { }, {
Name: "application_credential_id", Name: "application_credential_id",
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)", Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
}, { }, {
Name: "application_credential_name", Name: "application_credential_name",
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)", Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
}, { }, {
Name: "application_credential_secret", Name: "application_credential_secret",
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)", Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
}, { }, {
Name: "auth_version", Name: "auth_version",
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)", Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).",
Default: 0, Default: 0,
}, { }, {
Name: "endpoint_type", Name: "endpoint_type",
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)", Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE).",
Default: "public", Default: "public",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Help: "Public (default, choose this if not sure)",
Value: "public", Value: "public",
Help: "Public (default, choose this if not sure)",
}, { }, {
Help: "Internal (use internal service net)",
Value: "internal", Value: "internal",
Help: "Internal (use internal service net)",
}, { }, {
Help: "Admin",
Value: "admin", Value: "admin",
Help: "Admin",
}}, }},
}, { }, {
Name: "leave_parts_on_error", Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure. It should be set to true for resuming uploads across different sessions.`, Help: `If true avoid calling abort upload on a failure.
It should be set to true for resuming uploads across different sessions.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "storage_policy", Name: "storage_policy",
Help: `The storage policy to use when creating a new container Help: `The storage policy to use when creating a new container.
This applies the specified storage policy when creating a new This applies the specified storage policy when creating a new
container. The policy cannot be changed afterwards. The allowed container. The policy cannot be changed afterwards. The allowed
@ -184,14 +186,14 @@ configuration values and their meaning depend on your Swift storage
provider.`, provider.`,
Default: "", Default: "",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Help: "Default",
Value: "", Value: "",
Help: "Default",
}, { }, {
Help: "OVH Public Cloud Storage",
Value: "pcs", Value: "pcs",
Help: "OVH Public Cloud Storage",
}, { }, {
Help: "OVH Public Cloud Archive",
Value: "pca", Value: "pca",
Help: "OVH Public Cloud Archive",
}}, }},
}}, SharedOptions...), }}, SharedOptions...),
}) })

View File

@ -98,13 +98,13 @@ func init() {
}}, }},
{ {
Name: "access_grant", Name: "access_grant",
Help: "Access Grant.", Help: "Access grant.",
Required: false, Required: false,
Provider: "existing", Provider: "existing",
}, },
{ {
Name: "satellite_address", Name: "satellite_address",
Help: "Satellite Address. Custom satellite address should match the format: `<nodeid>@<address>:<port>`.", Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
Required: false, Required: false,
Provider: newProvider, Provider: newProvider,
Default: "us-central-1.tardigrade.io", Default: "us-central-1.tardigrade.io",
@ -122,13 +122,13 @@ func init() {
}, },
{ {
Name: "api_key", Name: "api_key",
Help: "API Key.", Help: "API key.",
Required: false, Required: false,
Provider: newProvider, Provider: newProvider,
}, },
{ {
Name: "passphrase", Name: "passphrase",
Help: "Encryption Passphrase. To access existing objects enter passphrase used for uploading.", Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
Required: false, Required: false,
Provider: newProvider, Provider: newProvider,
}, },

View File

@ -30,7 +30,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "upstreams", Name: "upstreams",
Help: "List of space separated upstreams.\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.\n", Help: "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
Required: true, Required: true,
}, { }, {
Name: "action_policy", Name: "action_policy",
@ -49,7 +49,7 @@ func init() {
Default: "ff", Default: "ff",
}, { }, {
Name: "cache_time", Name: "cache_time",
Help: "Cache time of usage and free space (in seconds). This option is only useful when a path preserving policy is used.", Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
Required: true, Required: true,
Default: 120, Default: 120,
}}, }},

View File

@ -43,7 +43,7 @@ func init() {
Description: "Uptobox", Description: "Uptobox",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Help: "Your access Token, get it from https://uptobox.com/my_account", Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.",
Name: "access_token", Name: "access_token",
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,

View File

@ -70,15 +70,15 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "url", Name: "url",
Help: "URL of http host to connect to", Help: "URL of http host to connect to.",
Required: true, Required: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "https://example.com", Value: "https://example.com",
Help: "Connect to example.com", Help: "Connect to example.com.",
}}, }},
}, { }, {
Name: "vendor", Name: "vendor",
Help: "Name of the Webdav site/service/software you are using", Help: "Name of the Webdav site/service/software you are using.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "nextcloud", Value: "nextcloud",
Help: "Nextcloud", Help: "Nextcloud",
@ -87,27 +87,27 @@ func init() {
Help: "Owncloud", Help: "Owncloud",
}, { }, {
Value: "sharepoint", Value: "sharepoint",
Help: "Sharepoint Online, authenticated by Microsoft account.", Help: "Sharepoint Online, authenticated by Microsoft account",
}, { }, {
Value: "sharepoint-ntlm", Value: "sharepoint-ntlm",
Help: "Sharepoint with NTLM authentication. Usually self-hosted or on-premises.", Help: "Sharepoint with NTLM authentication, usually self-hosted or on-premises",
}, { }, {
Value: "other", Value: "other",
Help: "Other site/service or software", Help: "Other site/service or software",
}}, }},
}, { }, {
Name: "user", Name: "user",
Help: "User name. In case NTLM authentication is used, the username should be in the format 'Domain\\User'.", Help: "User name.\n\nIn case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
}, { }, {
Name: "pass", Name: "pass",
Help: "Password.", Help: "Password.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "bearer_token", Name: "bearer_token",
Help: "Bearer token instead of user/pass (e.g. a Macaroon)", Help: "Bearer token instead of user/pass (e.g. a Macaroon).",
}, { }, {
Name: "bearer_token_command", Name: "bearer_token_command",
Help: "Command to run to get a bearer token", Help: "Command to run to get a bearer token.",
Advanced: true, Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
@ -115,7 +115,7 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "headers", Name: "headers",
Help: `Set HTTP headers for all transactions Help: `Set HTTP headers for all transactions.
Use this to set additional HTTP headers for all transactions Use this to set additional HTTP headers for all transactions

View File

@ -24,8 +24,8 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name.") flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name")
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format.") flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -26,11 +26,11 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters.") flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters")
flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters.") flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters")
flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).") flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve)")
flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters.") flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters")
flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing.") flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -32,7 +32,7 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.") flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash")
flags.StringVarP(cmdFlags, &checkFileHashType, "checkfile", "C", checkFileHashType, "Treat source:path as a SUM file with hashes of given type") flags.StringVarP(cmdFlags, &checkFileHashType, "checkfile", "C", checkFileHashType, "Treat source:path as a SUM file with hashes of given type")
AddFlags(cmdFlags) AddFlags(cmdFlags)
} }

View File

@ -18,7 +18,7 @@ var download = false
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents.") flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents")
check.AddFlags(cmdFlags) check.AddFlags(cmdFlags)
} }

View File

@ -49,11 +49,11 @@ var (
// Flags // Flags
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file") cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file")
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file") memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file")
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)") statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable)")
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second") dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second")
version bool version bool
retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail") retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail")
retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)") retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable)")
// Errors // Errors
errorCommandNotFound = errors.New("command not found") errorCommandNotFound = errors.New("command not found")
errorUncategorized = errors.New("uncategorized error") errorUncategorized = errors.New("uncategorized error")

View File

@ -260,13 +260,13 @@ func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.Co
func init() { func init() {
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} { for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured.") flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.") flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions.") flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer.") flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions.") flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions")
flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue.") flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue")
flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue.") flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue")
} }
} }

View File

@ -19,7 +19,7 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlag := commandDefinition.Flags() cmdFlag := commandDefinition.Flags()
flags.FVarP(cmdFlag, &dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename.") flags.FVarP(cmdFlag, &dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename")
flags.BoolVarP(cmdFlag, &byHash, "by-hash", "", false, "Find indentical hashes rather than names") flags.BoolVarP(cmdFlag, &byHash, "by-hash", "", false, "Find indentical hashes rather than names")
} }

View File

@ -18,7 +18,7 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type as well as names.") flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type as well as names")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -19,7 +19,7 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing.") flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -32,14 +32,14 @@ func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see help for details") flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see help for details")
flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format.") flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format")
flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names.") flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names")
flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash") flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash")
flags.BoolVarP(cmdFlags, &filesOnly, "files-only", "", false, "Only list files.") flags.BoolVarP(cmdFlags, &filesOnly, "files-only", "", false, "Only list files")
flags.BoolVarP(cmdFlags, &dirsOnly, "dirs-only", "", false, "Only list directories.") flags.BoolVarP(cmdFlags, &dirsOnly, "dirs-only", "", false, "Only list directories")
flags.BoolVarP(cmdFlags, &csv, "csv", "", false, "Output in CSV format.") flags.BoolVarP(cmdFlags, &csv, "csv", "", false, "Output in CSV format")
flags.BoolVarP(cmdFlags, &absolute, "absolute", "", false, "Put a leading / in front of path names.") flags.BoolVarP(cmdFlags, &absolute, "absolute", "", false, "Put a leading / in front of path names")
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing.") flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -23,16 +23,16 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &opt.Recurse, "recursive", "R", false, "Recurse into the listing.") flags.BoolVarP(cmdFlags, &opt.Recurse, "recursive", "R", false, "Recurse into the listing")
flags.BoolVarP(cmdFlags, &opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer).") flags.BoolVarP(cmdFlags, &opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer)")
flags.BoolVarP(cmdFlags, &opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).") flags.BoolVarP(cmdFlags, &opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up)")
flags.BoolVarP(cmdFlags, &opt.NoMimeType, "no-mimetype", "", false, "Don't read the mime type (can speed things up).") flags.BoolVarP(cmdFlags, &opt.NoMimeType, "no-mimetype", "", false, "Don't read the mime type (can speed things up)")
flags.BoolVarP(cmdFlags, &opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.") flags.BoolVarP(cmdFlags, &opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names")
flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.") flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object")
flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing.") flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing")
flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.") flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing")
flags.StringArrayVarP(cmdFlags, &opt.HashTypes, "hash-type", "", nil, "Show only this hash type (may be repeated).") flags.StringArrayVarP(cmdFlags, &opt.HashTypes, "hash-type", "", nil, "Show only this hash type (may be repeated)")
flags.BoolVarP(cmdFlags, &statOnly, "stat", "", false, "Just return the info for the pointed to file.") flags.BoolVarP(cmdFlags, &statOnly, "stat", "", false, "Just return the info for the pointed to file")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -111,29 +111,29 @@ var Opt Options
// AddFlags adds the non filing system specific flags to the command // AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) { func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("mount", &Opt) rc.AddOption("mount", &Opt)
flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v.") flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v")
flags.DurationVarP(flagSet, &Opt.AttrTimeout, "attr-timeout", "", Opt.AttrTimeout, "Time for which file/directory attributes are cached.") flags.DurationVarP(flagSet, &Opt.AttrTimeout, "attr-timeout", "", Opt.AttrTimeout, "Time for which file/directory attributes are cached")
flags.StringArrayVarP(flagSet, &Opt.ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.") flags.StringArrayVarP(flagSet, &Opt.ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp (repeat if required)")
flags.StringArrayVarP(flagSet, &Opt.ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.") flags.StringArrayVarP(flagSet, &Opt.ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)")
// Non-Windows only // Non-Windows only
flags.BoolVarP(flagSet, &Opt.Daemon, "daemon", "", Opt.Daemon, "Run mount in background and exit parent process. Not supported on Windows. As background output is suppressed, use --log-file with --log-format=pid,... to monitor.") flags.BoolVarP(flagSet, &Opt.Daemon, "daemon", "", Opt.Daemon, "Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)")
flags.DurationVarP(flagSet, &Opt.DaemonTimeout, "daemon-timeout", "", Opt.DaemonTimeout, "Time limit for rclone to respond to kernel. Not supported on Windows.") flags.DurationVarP(flagSet, &Opt.DaemonTimeout, "daemon-timeout", "", Opt.DaemonTimeout, "Time limit for rclone to respond to kernel (not supported on Windows)")
flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode. Not supported on Windows.") flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode (not supported on Windows)")
flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory. Not supported on Windows.") flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory (not supported on Windows)")
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user. Not supported on Windows.") flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user (not supported on Windows)")
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users. Not supported on Windows.") flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users (not supported on Windows)")
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads. Not supported on Windows.") flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)")
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads. Not supported on Windows.") flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used. Not supported on Windows.") flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
// Windows and OSX // Windows and OSX
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name. Supported on Windows and OSX only.") flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
// OSX only // OSX only
flags.BoolVarP(flagSet, &Opt.NoAppleDouble, "noappledouble", "", Opt.NoAppleDouble, "Ignore Apple Double (._) and .DS_Store files. Supported on OSX only.") flags.BoolVarP(flagSet, &Opt.NoAppleDouble, "noappledouble", "", Opt.NoAppleDouble, "Ignore Apple Double (._) and .DS_Store files (supported on OSX only)")
flags.BoolVarP(flagSet, &Opt.NoAppleXattr, "noapplexattr", "", Opt.NoAppleXattr, "Ignore all \"com.apple.*\" extended attributes. Supported on OSX only.") flags.BoolVarP(flagSet, &Opt.NoAppleXattr, "noapplexattr", "", Opt.NoAppleXattr, "Ignore all \"com.apple.*\" extended attributes (supported on OSX only)")
// Windows only // Windows only
flags.BoolVarP(flagSet, &Opt.NetworkMode, "network-mode", "", Opt.NetworkMode, "Mount as remote network drive, instead of fixed disk drive. Supported on Windows only") flags.BoolVarP(flagSet, &Opt.NetworkMode, "network-mode", "", Opt.NetworkMode, "Mount as remote network drive, instead of fixed disk drive (supported on Windows only)")
// Unix only // Unix only
flags.DurationVarP(flagSet, &Opt.DaemonWait, "daemon-wait", "", Opt.DaemonWait, "Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD). Ignored on Windows.") flags.DurationVarP(flagSet, &Opt.DaemonWait, "daemon-wait", "", Opt.DaemonWait, "Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows)")
} }
// NewMountCommand makes a mount command with the given name and Mount function // NewMountCommand makes a mount command with the given name and Mount function

View File

@ -57,22 +57,22 @@ Rclone's cloud storage systems as a file system with FUSE.
If no mountType is provided, the priority is given as follows: 1. mount 2.cmount 3.mount2 If no mountType is provided, the priority is given as follows: 1. mount 2.cmount 3.mount2
This takes the following parameters This takes the following parameters:
- fs - a remote path to be mounted (required) - fs - a remote path to be mounted (required)
- mountPoint: valid path on the local machine (required) - mountPoint: valid path on the local machine (required)
- mountType: One of the values (mount, cmount, mount2) specifies the mount implementation to use - mountType: one of the values (mount, cmount, mount2) specifies the mount implementation to use
- mountOpt: a JSON object with Mount options in. - mountOpt: a JSON object with Mount options in.
- vfsOpt: a JSON object with VFS options in. - vfsOpt: a JSON object with VFS options in.
Eg Example:
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}' rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
The vfsOpt are as described in options/get and can be seen in the the The vfsOpt are as described in options/get and can be seen in the the
"vfs" section when running and the mountOpt can be seen in the "mount" section. "vfs" section when running and the mountOpt can be seen in the "mount" section:
rclone rc options/get rclone rc options/get
`, `,
@ -150,11 +150,11 @@ rclone allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with mount any of Rclone's cloud storage systems as a file system with
FUSE. FUSE.
This takes the following parameters This takes the following parameters:
- mountPoint: valid path on the local machine where the mount was created (required) - mountPoint: valid path on the local machine where the mount was created (required)
Eg Example:
rclone rc mount/unmount mountPoint=/home/<user>/mountPoint rclone rc mount/unmount mountPoint=/home/<user>/mountPoint
`, `,
@ -222,7 +222,7 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: listMountsRc, Fn: listMountsRc,
Title: "Show current mount points", Title: "Show current mount points",
Help: `This shows currently mounted points, which can be used for performing an unmount Help: `This shows currently mounted points, which can be used for performing an unmount.
This takes no parameters and returns This takes no parameters and returns
@ -272,7 +272,7 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: unmountAll, Fn: unmountAll,
Title: "Show current mount points", Title: "Show current mount points",
Help: `This shows currently mounted points, which can be used for performing an unmount Help: `This shows currently mounted points, which can be used for performing an unmount.
This takes no parameters and returns error if unmount does not succeed. This takes no parameters and returns error if unmount does not succeed.

View File

@ -35,14 +35,14 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &noOutput, "no-output", "", noOutput, "If set, don't output the JSON result.") flags.BoolVarP(cmdFlags, &noOutput, "no-output", "", noOutput, "If set, don't output the JSON result")
flags.StringVarP(cmdFlags, &url, "url", "", url, "URL to connect to rclone remote control.") flags.StringVarP(cmdFlags, &url, "url", "", url, "URL to connect to rclone remote control")
flags.StringVarP(cmdFlags, &jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args.") flags.StringVarP(cmdFlags, &jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args")
flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control.") flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control")
flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control.") flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control")
flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP.") flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP")
flags.StringArrayVarP(cmdFlags, &options, "opt", "o", options, "Option in the form name=value or name placed in the \"opt\" array.") flags.StringArrayVarP(cmdFlags, &options, "opt", "o", options, "Option in the form name=value or name placed in the \"opt\" array")
flags.StringArrayVarP(cmdFlags, &arguments, "arg", "a", arguments, "Argument placed in the \"arg\" array.") flags.StringArrayVarP(cmdFlags, &arguments, "arg", "a", arguments, "Argument placed in the \"arg\" array")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -51,10 +51,10 @@ var Opt = Options{}
func init() { func init() {
cmd.Root.AddCommand(cmdSelfUpdate) cmd.Root.AddCommand(cmdSelfUpdate)
cmdFlags := cmdSelfUpdate.Flags() cmdFlags := cmdSelfUpdate.Flags()
flags.BoolVarP(cmdFlags, &Opt.Check, "check", "", Opt.Check, "Check for latest release, do not download.") flags.BoolVarP(cmdFlags, &Opt.Check, "check", "", Opt.Check, "Check for latest release, do not download")
flags.StringVarP(cmdFlags, &Opt.Output, "output", "", Opt.Output, "Save the downloaded binary at a given path (default: replace running binary)") flags.StringVarP(cmdFlags, &Opt.Output, "output", "", Opt.Output, "Save the downloaded binary at a given path (default: replace running binary)")
flags.BoolVarP(cmdFlags, &Opt.Stable, "stable", "", Opt.Stable, "Install stable release (this is the default)") flags.BoolVarP(cmdFlags, &Opt.Stable, "stable", "", Opt.Stable, "Install stable release (this is the default)")
flags.BoolVarP(cmdFlags, &Opt.Beta, "beta", "", Opt.Beta, "Install beta release.") flags.BoolVarP(cmdFlags, &Opt.Beta, "beta", "", Opt.Beta, "Install beta release")
flags.StringVarP(cmdFlags, &Opt.Version, "version", "", Opt.Version, "Install the given rclone version (default: latest)") flags.StringVarP(cmdFlags, &Opt.Version, "version", "", Opt.Version, "Install the given rclone version (default: latest)")
flags.StringVarP(cmdFlags, &Opt.Package, "package", "", Opt.Package, "Package format: zip|deb|rpm (default: zip)") flags.StringVarP(cmdFlags, &Opt.Package, "package", "", Opt.Package, "Package format: zip|deb|rpm (default: zip)")
} }

View File

@ -42,9 +42,9 @@ var (
func addFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) { func addFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
rc.AddOption("dlna", &Opt) rc.AddOption("dlna", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "ip:port or :port to bind the DLNA http server to.") flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "The ip:port or :port to bind the DLNA http server to")
flags.StringVarP(flagSet, &Opt.FriendlyName, prefix+"name", "", Opt.FriendlyName, "name of DLNA server") flags.StringVarP(flagSet, &Opt.FriendlyName, prefix+"name", "", Opt.FriendlyName, "Name of DLNA server")
flags.BoolVarP(flagSet, &Opt.LogTrace, prefix+"log-trace", "", Opt.LogTrace, "enable trace logging of SOAP traffic") flags.BoolVarP(flagSet, &Opt.LogTrace, prefix+"log-trace", "", Opt.LogTrace, "Enable trace logging of SOAP traffic")
} }
// AddFlags add the command line flags for DLNA serving. // AddFlags add the command line flags for DLNA serving.

View File

@ -33,11 +33,11 @@ var (
func init() { func init() {
cmdFlags := Command.Flags() cmdFlags := Command.Flags()
// Add command specific flags // Add command specific flags
flags.StringVarP(cmdFlags, &baseDir, "base-dir", "", baseDir, "base directory for volumes") flags.StringVarP(cmdFlags, &baseDir, "base-dir", "", baseDir, "Base directory for volumes")
flags.StringVarP(cmdFlags, &socketAddr, "socket-addr", "", socketAddr, "<host:port> or absolute path (default: /run/docker/plugins/rclone.sock)") flags.StringVarP(cmdFlags, &socketAddr, "socket-addr", "", socketAddr, "Address <host:port> or absolute path (default: /run/docker/plugins/rclone.sock)")
flags.IntVarP(cmdFlags, &socketGid, "socket-gid", "", socketGid, "GID for unix socket (default: current process GID)") flags.IntVarP(cmdFlags, &socketGid, "socket-gid", "", socketGid, "GID for unix socket (default: current process GID)")
flags.BoolVarP(cmdFlags, &forgetState, "forget-state", "", forgetState, "skip restoring previous state") flags.BoolVarP(cmdFlags, &forgetState, "forget-state", "", forgetState, "Skip restoring previous state")
flags.BoolVarP(cmdFlags, &noSpec, "no-spec", "", noSpec, "do not write spec file") flags.BoolVarP(cmdFlags, &noSpec, "no-spec", "", noSpec, "Do not write spec file")
// Add common mount/vfs flags // Add common mount/vfs flags
mountlib.AddFlags(cmdFlags) mountlib.AddFlags(cmdFlags)
vfsflags.AddFlags(cmdFlags) vfsflags.AddFlags(cmdFlags)

View File

@ -59,11 +59,11 @@ var Opt = DefaultOpt
// AddFlags adds flags for ftp // AddFlags adds flags for ftp
func AddFlags(flagSet *pflag.FlagSet) { func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("ftp", &Opt) rc.AddOption("ftp", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.") flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to")
flags.StringVarP(flagSet, &Opt.PublicIP, "public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.") flags.StringVarP(flagSet, &Opt.PublicIP, "public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections")
flags.StringVarP(flagSet, &Opt.PassivePorts, "passive-port", "", Opt.PassivePorts, "Passive port range to use.") flags.StringVarP(flagSet, &Opt.PassivePorts, "passive-port", "", Opt.PassivePorts, "Passive port range to use")
flags.StringVarP(flagSet, &Opt.BasicUser, "user", "", Opt.BasicUser, "User name for authentication.") flags.StringVarP(flagSet, &Opt.BasicUser, "user", "", Opt.BasicUser, "User name for authentication")
flags.StringVarP(flagSet, &Opt.BasicPass, "pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)") flags.StringVarP(flagSet, &Opt.BasicPass, "pass", "", Opt.BasicPass, "Password for authentication (empty value allow every password)")
flags.StringVarP(flagSet, &Opt.TLSCert, "cert", "", Opt.TLSCert, "TLS PEM key (concatenation of certificate and CA certificate)") flags.StringVarP(flagSet, &Opt.TLSCert, "cert", "", Opt.TLSCert, "TLS PEM key (concatenation of certificate and CA certificate)")
flags.StringVarP(flagSet, &Opt.TLSKey, "key", "", Opt.TLSKey, "TLS PEM Private key") flags.StringVarP(flagSet, &Opt.TLSKey, "key", "", Opt.TLSKey, "TLS PEM Private key")
} }

View File

@ -47,7 +47,7 @@ type Options struct {
// AddFlags for the templating functionality // AddFlags for the templating functionality
func AddFlags(flagSet *pflag.FlagSet, prefix string, Opt *Options) { func AddFlags(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User Specified Template.") flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User-specified template")
} }
// AfterEpoch returns the time since the epoch for the given time // AfterEpoch returns the time since the epoch for the given time

View File

@ -15,7 +15,7 @@ var (
// AddFlagsPrefix adds flags for the httplib // AddFlagsPrefix adds flags for the httplib
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options) { func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options) {
rc.AddOption(prefix+"http", &Opt) rc.AddOption(prefix+"http", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.") flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to")
flags.DurationVarP(flagSet, &Opt.ServerReadTimeout, prefix+"server-read-timeout", "", Opt.ServerReadTimeout, "Timeout for server reading data") flags.DurationVarP(flagSet, &Opt.ServerReadTimeout, prefix+"server-read-timeout", "", Opt.ServerReadTimeout, "Timeout for server reading data")
flags.DurationVarP(flagSet, &Opt.ServerWriteTimeout, prefix+"server-write-timeout", "", Opt.ServerWriteTimeout, "Timeout for server writing data") flags.DurationVarP(flagSet, &Opt.ServerWriteTimeout, prefix+"server-write-timeout", "", Opt.ServerWriteTimeout, "Timeout for server writing data")
flags.IntVarP(flagSet, &Opt.MaxHeaderBytes, prefix+"max-header-bytes", "", Opt.MaxHeaderBytes, "Maximum size of request header") flags.IntVarP(flagSet, &Opt.MaxHeaderBytes, prefix+"max-header-bytes", "", Opt.MaxHeaderBytes, "Maximum size of request header")
@ -24,10 +24,10 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with") flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done") flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done")
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication") flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.") flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.") flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root.") flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")
flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User Specified Template.") flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User-specified template")
} }

View File

@ -14,5 +14,5 @@ var (
// AddFlags adds the non filing system specific flags to the command // AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) { func AddFlags(flagSet *pflag.FlagSet) {
flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth.") flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth")
} }

View File

@ -37,10 +37,10 @@ var (
func init() { func init() {
httpflags.AddFlags(Command.Flags()) httpflags.AddFlags(Command.Flags())
flagSet := Command.Flags() flagSet := Command.Flags()
flags.BoolVarP(flagSet, &stdio, "stdio", "", false, "run an HTTP2 server on stdin/stdout") flags.BoolVarP(flagSet, &stdio, "stdio", "", false, "Run an HTTP2 server on stdin/stdout")
flags.BoolVarP(flagSet, &appendOnly, "append-only", "", false, "disallow deletion of repository data") flags.BoolVarP(flagSet, &appendOnly, "append-only", "", false, "Disallow deletion of repository data")
flags.BoolVarP(flagSet, &privateRepos, "private-repos", "", false, "users can only access their private repo") flags.BoolVarP(flagSet, &privateRepos, "private-repos", "", false, "Users can only access their private repo")
flags.BoolVarP(flagSet, &cacheObjects, "cache-objects", "", true, "cache listed objects") flags.BoolVarP(flagSet, &cacheObjects, "cache-objects", "", true, "Cache listed objects")
} }
// Command definition for cobra // Command definition for cobra

View File

@ -43,12 +43,12 @@ var Opt = DefaultOpt
// AddFlags adds flags for the sftp // AddFlags adds flags for the sftp
func AddFlags(flagSet *pflag.FlagSet, Opt *Options) { func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
rc.AddOption("sftp", &Opt) rc.AddOption("sftp", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.") flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to")
flags.StringArrayVarP(flagSet, &Opt.HostKeys, "key", "", Opt.HostKeys, "SSH private host key file (Can be multi-valued, leave blank to auto generate)") flags.StringArrayVarP(flagSet, &Opt.HostKeys, "key", "", Opt.HostKeys, "SSH private host key file (Can be multi-valued, leave blank to auto generate)")
flags.StringVarP(flagSet, &Opt.AuthorizedKeys, "authorized-keys", "", Opt.AuthorizedKeys, "Authorized keys file") flags.StringVarP(flagSet, &Opt.AuthorizedKeys, "authorized-keys", "", Opt.AuthorizedKeys, "Authorized keys file")
flags.StringVarP(flagSet, &Opt.User, "user", "", Opt.User, "User name for authentication.") flags.StringVarP(flagSet, &Opt.User, "user", "", Opt.User, "User name for authentication")
flags.StringVarP(flagSet, &Opt.Pass, "pass", "", Opt.Pass, "Password for authentication.") flags.StringVarP(flagSet, &Opt.Pass, "pass", "", Opt.Pass, "Password for authentication")
flags.BoolVarP(flagSet, &Opt.NoAuth, "no-auth", "", Opt.NoAuth, "Allow connections with no authentication if set.") flags.BoolVarP(flagSet, &Opt.NoAuth, "no-auth", "", Opt.NoAuth, "Allow connections with no authentication if set")
flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", Opt.Stdio, "Run an sftp server on run stdin/stdout") flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", Opt.Stdio, "Run an sftp server on run stdin/stdout")
} }

View File

@ -18,7 +18,7 @@ var jsonOutput bool
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "format output as JSON") flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -20,7 +20,7 @@ var (
func init() { func init() {
test.Command.AddCommand(commandDefinition) test.Command.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.DurationVarP(cmdFlags, &pollInterval, "poll-interval", "", pollInterval, "Time to wait between polling for changes.") flags.DurationVarP(cmdFlags, &pollInterval, "poll-interval", "", pollInterval, "Time to wait between polling for changes")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -47,13 +47,13 @@ var (
func init() { func init() {
test.Command.AddCommand(commandDefinition) test.Command.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file.") flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file")
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", false, "Check UTF-8 Normalization.") flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", false, "Check UTF-8 Normalization")
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", false, "Check control characters.") flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", false, "Check control characters")
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.") flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file")
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", false, "Check max filename length.") flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", false, "Check max filename length")
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", false, "Check uploads with indeterminate file size.") flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", false, "Check uploads with indeterminate file size")
flags.BoolVarP(cmdFlags, &all, "all", "", false, "Run all tests.") flags.BoolVarP(cmdFlags, &all, "all", "", false, "Run all tests")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -30,10 +30,10 @@ const (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &notCreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist. Implied with --recursive.") flags.BoolVarP(cmdFlags, &notCreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist (implied with --recursive)")
flags.StringVarP(cmdFlags, &timeAsArgument, "timestamp", "t", "", "Use specified time instead of the current time of day.") flags.StringVarP(cmdFlags, &timeAsArgument, "timestamp", "t", "", "Use specified time instead of the current time of day")
flags.BoolVarP(cmdFlags, &localTime, "localtime", "", false, "Use localtime for timestamp, not UTC.") flags.BoolVarP(cmdFlags, &localTime, "localtime", "", false, "Use localtime for timestamp, not UTC")
flags.BoolVarP(cmdFlags, &recursive, "recursive", "R", false, "Recursively touch all files.") flags.BoolVarP(cmdFlags, &recursive, "recursive", "R", false, "Recursively touch all files")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -32,18 +32,19 @@ func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
// List // List
flags.BoolVarP(cmdFlags, &opts.All, "all", "a", false, "All files are listed (list . files too).") flags.BoolVarP(cmdFlags, &opts.All, "all", "a", false, "All files are listed (list . files too)")
flags.BoolVarP(cmdFlags, &opts.DirsOnly, "dirs-only", "d", false, "List directories only.") flags.BoolVarP(cmdFlags, &opts.DirsOnly, "dirs-only", "d", false, "List directories only")
flags.BoolVarP(cmdFlags, &opts.FullPath, "full-path", "", false, "Print the full path prefix for each file.") flags.BoolVarP(cmdFlags, &opts.FullPath, "full-path", "", false, "Print the full path prefix for each file")
//flags.BoolVarP(cmdFlags, &opts.IgnoreCase, "ignore-case", "", false, "Ignore case when pattern matching.") //flags.BoolVarP(cmdFlags, &opts.IgnoreCase, "ignore-case", "", false, "Ignore case when pattern matching")
flags.BoolVarP(cmdFlags, &noReport, "noreport", "", false, "Turn off file/directory count at end of tree listing.") flags.BoolVarP(cmdFlags, &noReport, "noreport", "", false, "Turn off file/directory count at end of tree listing")
// flags.BoolVarP(cmdFlags, &opts.FollowLink, "follow", "l", false, "Follow symbolic links like directories.") // flags.BoolVarP(cmdFlags, &opts.FollowLink, "follow", "l", false, "Follow symbolic links like directories")
flags.IntVarP(cmdFlags, &opts.DeepLevel, "level", "", 0, "Descend only level directories deep.") flags.IntVarP(cmdFlags, &opts.DeepLevel, "level", "", 0, "Descend only level directories deep")
// flags.StringVarP(cmdFlags, &opts.Pattern, "pattern", "P", "", "List only those files that match the pattern given.") // flags.StringVarP(cmdFlags, &opts.Pattern, "pattern", "P", "", "List only those files that match the pattern given")
// flags.StringVarP(cmdFlags, &opts.IPattern, "exclude", "", "", "Do not list files that match the given pattern.") // flags.StringVarP(cmdFlags, &opts.IPattern, "exclude", "", "", "Do not list files that match the given pattern")
flags.StringVarP(cmdFlags, &outFileName, "output", "o", "", "Output to file instead of stdout.") flags.StringVarP(cmdFlags, &outFileName, "output", "o", "", "Output to file instead of stdout")
// Files // Files
flags.BoolVarP(cmdFlags, &opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.") flags.BoolVarP(cmdFlags, &opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.")
flags.BoolVarP(cmdFlags, &opts.UnitSize, "human", "", false, "Print the size in a more human readable way.")
flags.BoolVarP(cmdFlags, &opts.FileMode, "protections", "p", false, "Print the protections for each file.") flags.BoolVarP(cmdFlags, &opts.FileMode, "protections", "p", false, "Print the protections for each file.")
// flags.BoolVarP(cmdFlags, &opts.ShowUid, "uid", "", false, "Displays file owner or UID number.") // flags.BoolVarP(cmdFlags, &opts.ShowUid, "uid", "", false, "Displays file owner or UID number.")
// flags.BoolVarP(cmdFlags, &opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.") // flags.BoolVarP(cmdFlags, &opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.")
@ -52,16 +53,16 @@ func init() {
// flags.BoolVarP(cmdFlags, &opts.Inodes, "inodes", "", false, "Print inode number of each file.") // flags.BoolVarP(cmdFlags, &opts.Inodes, "inodes", "", false, "Print inode number of each file.")
// flags.BoolVarP(cmdFlags, &opts.Device, "device", "", false, "Print device ID number to which each file belongs.") // flags.BoolVarP(cmdFlags, &opts.Device, "device", "", false, "Print device ID number to which each file belongs.")
// Sort // Sort
flags.BoolVarP(cmdFlags, &opts.NoSort, "unsorted", "U", false, "Leave files unsorted.") flags.BoolVarP(cmdFlags, &opts.NoSort, "unsorted", "U", false, "Leave files unsorted")
flags.BoolVarP(cmdFlags, &opts.VerSort, "version", "", false, "Sort files alphanumerically by version.") flags.BoolVarP(cmdFlags, &opts.VerSort, "version", "", false, "Sort files alphanumerically by version")
flags.BoolVarP(cmdFlags, &opts.ModSort, "sort-modtime", "t", false, "Sort files by last modification time.") flags.BoolVarP(cmdFlags, &opts.ModSort, "sort-modtime", "t", false, "Sort files by last modification time")
flags.BoolVarP(cmdFlags, &opts.CTimeSort, "sort-ctime", "", false, "Sort files by last status change time.") flags.BoolVarP(cmdFlags, &opts.CTimeSort, "sort-ctime", "", false, "Sort files by last status change time")
flags.BoolVarP(cmdFlags, &opts.ReverSort, "sort-reverse", "r", false, "Reverse the order of the sort.") flags.BoolVarP(cmdFlags, &opts.ReverSort, "sort-reverse", "r", false, "Reverse the order of the sort")
flags.BoolVarP(cmdFlags, &opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables).") flags.BoolVarP(cmdFlags, &opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables)")
flags.StringVarP(cmdFlags, &sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime.") flags.StringVarP(cmdFlags, &sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime")
// Graphics // Graphics
flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "", false, "Don't print indentation lines.") flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "", false, "Don't print indentation lines")
flags.BoolVarP(cmdFlags, &opts.Colorize, "color", "C", false, "Turn colorization on always.") flags.BoolVarP(cmdFlags, &opts.Colorize, "color", "C", false, "Turn colorization on always")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -22,7 +22,7 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version.") flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -13,35 +13,35 @@ split into two groups, non backend and backend flags.
These flags are available for every command. These flags are available for every command.
``` ```
--ask-password Allow prompt for password for encrypted configuration. (default true) --ask-password Allow prompt for password for encrypted configuration (default true)
--auto-confirm If enabled, do not request console confirmation. --auto-confirm If enabled, do not request console confirmation
--backup-dir string Make backups into hierarchy based in DIR. --backup-dir string Make backups into hierarchy based in DIR
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name. --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16Mi) --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
--bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable. --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
--bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable. --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
--ca-cert string CA certificate used to verify servers --ca-cert string CA certificate used to verify servers
--temp-dir string Directory rclone will use for temporary files. (default "$TMPDIR") --temp-dir string Directory rclone will use for temporary files (default "$TMPDIR")
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone") --cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
--check-first Do all the checks before starting transfers. --check-first Do all the checks before starting transfers
--checkers int Number of checkers to run in parallel. (default 8) --checkers int Number of checkers to run in parallel (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--client-cert string Client SSL certificate (PEM) for mutual TLS auth --client-cert string Client SSL certificate (PEM) for mutual TLS auth
--client-key string Client SSL private key (PEM) for mutual TLS auth --client-key string Client SSL private key (PEM) for mutual TLS auth
--compare-dest stringArray Include additional comma separated server-side paths during comparison. --compare-dest stringArray Include additional comma separated server-side paths during comparison
--config string Config file. (default "$HOME/.config/rclone/rclone.conf") --config string Config file (default "$HOME/.config/rclone/rclone.conf")
--contimeout duration Connect timeout (default 1m0s) --contimeout duration Connect timeout (default 1m0s)
--copy-dest stringArray Implies --compare-dest but also copies files from paths into destination. --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
--cpuprofile string Write cpu profile to file --cpuprofile string Write cpu profile to file
--cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD") --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
--delete-after When synchronizing, delete files on destination after transferring (default) --delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring --delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer --delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use --disable help to see a list. --disable string Disable a comma separated list of features (use --disable help to see a list)
--disable-http2 Disable HTTP/2 in the global transport. --disable-http2 Disable HTTP/2 in the global transport
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
--dscp string Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21. --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP headers - may contain sensitive info --dump-headers Dump HTTP headers - may contain sensitive info
@ -50,7 +50,7 @@ These flags are available for every command.
--exclude-from stringArray Read exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read exclude patterns from file (use - to read from stdin)
--exclude-if-present string Exclude directories if filename is present --exclude-if-present string Exclude directories if filename is present
--expect-continue-timeout duration Timeout when using expect / 100-continue in HTTP (default 1s) --expect-continue-timeout duration Timeout when using expect / 100-continue in HTTP (default 1s)
--fast-list Use recursive list if available. Uses more memory but fewer transactions. --fast-list Use recursive list if available; Uses more memory but fewer transactions
--files-from stringArray Read list of source-file names from file (use - to read from stdin) --files-from stringArray Read list of source-file names from file (use - to read from stdin)
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin) --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
-f, --filter stringArray Add a file-filtering rule -f, --filter stringArray Add a file-filtering rule
@ -62,12 +62,12 @@ These flags are available for every command.
--header-upload stringArray Set HTTP header for upload transactions --header-upload stringArray Set HTTP header for upload transactions
--ignore-case Ignore case in filters (case insensitive) --ignore-case Ignore case in filters (case insensitive)
--ignore-case-sync Ignore case when synchronizing --ignore-case-sync Ignore case when synchronizing
--ignore-checksum Skip post copy check of checksums. --ignore-checksum Skip post copy check of checksums
--ignore-errors delete even if there are I/O errors --ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination --ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum. --ignore-size Ignore size when skipping use mod-time or checksum
-I, --ignore-times Don't skip files that match size and time - transfer all files -I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified. --immutable Do not modify files, fail if existing files have been modified
--include stringArray Include files matching pattern --include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file (use - to read from stdin) --include-from stringArray Read include patterns from file (use - to read from stdin)
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
@ -75,88 +75,88 @@ These flags are available for every command.
--log-file string Log everything to this file --log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time") --log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE") --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--log-systemd Activate systemd integration for the logger. --log-systemd Activate systemd integration for the logger
--low-level-retries int Number of low level retries to do. (default 10) --low-level-retries int Number of low level retries to do (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off) --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000) --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1) --max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1) --max-depth int If set limits the recursion depth to this (default -1)
--max-duration duration Maximum duration rclone will transfer data for. --max-duration duration Maximum duration rclone will transfer data for
--max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off) --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
--max-stats-groups int Maximum number of stats groups to keep in memory. On max oldest is discarded. (default 1000) --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off) --max-transfer SizeSuffix Maximum size of data to transfer (default off)
--memprofile string Write memory profile to file --memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
--modify-window duration Max time diff to be considered the same (default 1ns) --modify-window duration Max time diff to be considered the same (default 1ns)
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size. (default 250Mi) --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
--multi-thread-streams int Max number of streams to use for multi-thread downloads. (default 4) --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
--no-check-certificate Do not verify the server SSL certificate. Insecure. --no-check-certificate Do not verify the server SSL certificate (insecure)
--no-check-dest Don't check the destination, copy regardless. --no-check-dest Don't check the destination, copy regardless
--no-console Hide console window. Supported on Windows only. --no-console Hide console window (supported on Windows only)
--no-gzip-encoding Don't set Accept-Encoding: gzip. --no-gzip-encoding Don't set Accept-Encoding: gzip
--no-traverse Don't traverse destination file system on copy. --no-traverse Don't traverse destination file system on copy
--no-unicode-normalization Don't normalize unicode characters in filenames. --no-unicode-normalization Don't normalize unicode characters in filenames
--no-update-modtime Don't update destination mod-time if files identical. --no-update-modtime Don't update destination mod-time if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending' --order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--password-command SpaceSepList Command for supplying password for encrypted configuration. --password-command SpaceSepList Command for supplying password for encrypted configuration
-P, --progress Show progress during transfer. -P, --progress Show progress during transfer
--progress-terminal-title Show progress on the terminal title. Requires -P/--progress. --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
-q, --quiet Print as little stuff as possible -q, --quiet Print as little stuff as possible
--rc Enable the remote control server. --rc Enable the remote control server
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572") --rc-addr string IPaddress:Port or :Port to bind server to (default "localhost:5572")
--rc-allow-origin string Set the allowed origin for CORS. --rc-allow-origin string Set the allowed origin for CORS
--rc-baseurl string Prefix for URLs - leave blank for root. --rc-baseurl string Prefix for URLs - leave blank for root
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate) --rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with --rc-client-ca string Client certificate authority to verify clients with
--rc-enable-metrics Enable prometheus metrics on /metrics --rc-enable-metrics Enable prometheus metrics on /metrics
--rc-files string Path to local files to serve on the HTTP server. --rc-files string Path to local files to serve on the HTTP server
--rc-htpasswd string htpasswd file - if not provided no authentication is done --rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-job-expire-duration duration expire finished async jobs older than this value (default 1m0s) --rc-job-expire-duration duration expire finished async jobs older than this value (default 1m0s)
--rc-job-expire-interval duration interval to check for expired async jobs (default 10s) --rc-job-expire-interval duration interval to check for expired async jobs (default 10s)
--rc-key string SSL PEM Private key --rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096) --rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods. --rc-no-auth Don't require auth for certain methods
--rc-pass string Password for authentication. --rc-pass string Password for authentication
--rc-realm string realm for authentication (default "rclone") --rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects. --rc-serve Enable the serving of remote objects
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s) --rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s) --rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-template string User Specified Template. --rc-template string User Specified Template
--rc-user string User name for authentication. --rc-user string User name for authentication
--rc-web-fetch-url string URL to fetch the releases for webgui. (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest") --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
--rc-web-gui Launch WebGUI on localhost --rc-web-gui Launch WebGUI on localhost
--rc-web-gui-force-update Force update to latest version of web gui --rc-web-gui-force-update Force update to latest version of web gui
--rc-web-gui-no-open-browser Don't open the browser automatically --rc-web-gui-no-open-browser Don't open the browser automatically
--rc-web-gui-update Check and update to latest version of web gui --rc-web-gui-update Check and update to latest version of web gui
--refresh-times Refresh the modtime of remote files. --refresh-times Refresh the modtime of remote files
--retries int Retry operations this many times if they fail (default 3) --retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable) --retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5. (0 to disable)
--size-only Skip based on size only, not mod-time or checksum --size-only Skip based on size only, not mod-time or checksum
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s) --stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45) --stats-file-name-length int Max file name length in stats, 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO") --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line. --stats-one-line Make the stats fit on one line
--stats-one-line-date Enables --stats-one-line and add current date/time prefix. --stats-one-line-date Enable --stats-one-line and add current date/time prefix
--stats-one-line-date-format string Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes ("). See https://golang.org/pkg/time/#Time.Format --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
--stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes") --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100Ki) --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
--suffix string Suffix to add to changed files. --suffix string Suffix to add to changed files
--suffix-keep-extension Preserve the extension when using --suffix. --suffix-keep-extension Preserve the extension when using --suffix
--syslog Use Syslog for logging --syslog Use Syslog for logging
--syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON") --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s) --timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this. --tpslimit float Limit HTTP transactions per second to this
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1) --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
--track-renames When synchronizing, track file renames and do a server-side move if possible --track-renames When synchronizing, track file renames and do a server-side move if possible
--track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash") --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
--transfers int Number of file transfers to run in parallel. (default 4) --transfers int Number of file transfers to run in parallel (default 4)
-u, --update Skip files that are newer on the destination. -u, --update Skip files that are newer on the destination
--use-cookies Enable session cookiejar. --use-cookies Enable session cookiejar
--use-json-log Use json log format. --use-json-log Use json log format
--use-mmap Use mmap allocator (see docs). --use-mmap Use mmap allocator (see docs)
--use-server-modtime Use server modified time instead of object metadata --use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.56.0") --user-agent string Set the user-agent to a specified string (default "rclone/v1.56.0")
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
``` ```
@ -166,168 +166,168 @@ These flags are available for every command. They control the backends
and may be set in the config file. and may be set in the config file.
``` ```
--acd-auth-url string Auth server URL. --acd-auth-url string Auth server URL
--acd-client-id string OAuth Client Id --acd-client-id string OAuth Client Id
--acd-client-secret string OAuth Client Secret --acd-client-secret string OAuth Client Secret
--acd-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot) --acd-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8,Dot)
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9Gi) --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
--acd-token string OAuth Access Token as a JSON blob. --acd-token string OAuth Access Token as a JSON blob
--acd-token-url string Token server url. --acd-token-url string Token server url
--acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears. (default 3m0s) --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
--alias-remote string Remote or path to alias. --alias-remote string Remote or path to alias
--azureblob-access-tier string Access tier of blob: hot, cool or archive. --azureblob-access-tier string Access tier of blob: hot, cool or archive
--azureblob-account string Storage Account Name (leave blank to use SAS URL or Emulator) --azureblob-account string Storage Account Name (leave blank to use SAS URL or Emulator)
--azureblob-archive-tier-delete Delete archive tier blobs before overwriting. --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100 MiB). (default 4Mi) --azureblob-chunk-size SizeSuffix Upload chunk size (<= 100 MiB) (default 4Mi)
--azureblob-disable-checksum Don't store MD5 checksum with object metadata. --azureblob-disable-checksum Don't store MD5 checksum with object metadata
--azureblob-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) --azureblob-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
--azureblob-endpoint string Endpoint for the service --azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use SAS URL or Emulator) --azureblob-key string Storage Account Key (leave blank to use SAS URL or Emulator)
--azureblob-list-chunk int Size of blob list. (default 5000) --azureblob-list-chunk int Size of blob list (default 5000)
--azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed. (default 1m0s) --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
--azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
--azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified. --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
--azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified. --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
--azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified. --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
--azureblob-public-access string Public access level of a container: blob, container. --azureblob-public-access string Public access level of a container: blob, container
--azureblob-sas-url string SAS URL for container level access only --azureblob-sas-url string SAS URL for container level access only
--azureblob-service-principal-file string Path to file containing credentials for use with a service principal. --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
--azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated) --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
--azureblob-use-emulator Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint) --azureblob-use-emulator Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)
--azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
--b2-account string Account ID or Application Key ID --b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96Mi) --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
--b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi) --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files --b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d. (default 1w) --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
--b2-download-url string Custom endpoint for downloads. --b2-download-url string Custom endpoint for downloads
--b2-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) --b2-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--b2-endpoint string Endpoint for the service. --b2-endpoint string Endpoint for the service
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files. --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
--b2-key string Application Key --b2-key string Application Key
--b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed. (default 1m0s) --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
--b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging. --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200Mi) --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--b2-versions Include old versions in directory listings. --b2-versions Include old versions in directory listings
--box-access-token string Box App Primary Access Token --box-access-token string Box App Primary Access Token
--box-auth-url string Auth server URL. --box-auth-url string Auth server URL
--box-box-config-file string Box App config.json location --box-box-config-file string Box App config.json location
--box-box-sub-type string (default "user") --box-box-sub-type string (Default "user")
--box-client-id string OAuth Client Id --box-client-id string OAuth Client Id
--box-client-secret string OAuth Client Secret --box-client-secret string OAuth Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100) --box-commit-retries int Max number of times to try committing a multipart file (default 100)
--box-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot) --box-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
--box-root-folder-id string Fill in for rclone to use a non root folder as its starting point. --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
--box-token string OAuth Access Token as a JSON blob. --box-token string OAuth Access Token as a JSON blob
--box-token-url string Token server url. --box-token-url string Token server url
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB). (default 50Mi) --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s) --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming. --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend") --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5Mi) --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10Gi) --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend") --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start. --cache-db-purge Clear all the cached data for this remote on start
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.). (default 6h0m0s) --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc) (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verification when connecting to the Plex server --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
--cache-plex-password string The password of the Plex user (obscured) --cache-plex-password string The password of the Plex user (obscured)
--cache-plex-url string The URL of the Plex server --cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user --cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10) --cache-read-retries int How many times to retry a read from a cache storage (default 10)
--cache-remote string Remote to cache. --cache-remote string Remote to cache
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1) --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded. --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s) --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4) --cache-workers int How many workers should run in parallel to download chunks (default 4)
--cache-writes Cache file data on writes through the FS --cache-writes Cache file data on writes through the FS
--chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks. (default 2Gi) --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
--chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks. --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
--chunker-hash-type string Choose how chunker handles hash sums. All modes but "none" require metadata. (default "md5") --chunker-hash-type string Choose how chunker handles hash sums (default "md5")
--chunker-remote string Remote to chunk/unchunk. --chunker-remote string Remote to chunk/unchunk
--compress-level int GZIP compression level (-2 to 9). (default -1) --compress-level int GZIP compression level (-2 to 9) (default -1)
--compress-mode string Compression mode. (default "gzip") --compress-mode string Compression mode (default "gzip")
--compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size. (default 20Mi) --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
--compress-remote string Remote to compress. --compress-remote string Remote to compress
-L, --copy-links Follow symlinks and copy the pointed to item. -L, --copy-links Follow symlinks and copy the pointed to item
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true) --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard") --crypt-filename-encryption string How to encrypt the filenames (default "standard")
--crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted. --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
--crypt-password string Password or pass phrase for encryption. (obscured) --crypt-password string Password or pass phrase for encryption (obscured)
--crypt-password2 string Password or pass phrase for salt. Optional but recommended. (obscured) --crypt-password2 string Password or pass phrase for salt (obscured)
--crypt-remote string Remote to encrypt/decrypt. --crypt-remote string Remote to encrypt/decrypt
--crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs. --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs
--crypt-show-mapping For all files listed show how the names encrypt. --crypt-show-mapping For all files listed show how the names encrypt
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded. --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time. --drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx)
--drive-auth-owner-only Only consider files owned by the authenticated user. --drive-auth-owner-only Only consider files owned by the authenticated user
--drive-auth-url string Auth server URL. --drive-auth-url string Auth server URL
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8Mi) --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
--drive-client-id string Google Application Client Id --drive-client-id string Google Application Client Id
--drive-client-secret string OAuth Client Secret --drive-client-secret string OAuth Client Secret
--drive-disable-http2 Disable drive using http2 (default true) --drive-disable-http2 Disable drive using http2 (default true)
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8) --drive-encoding MultiEncoder This sets the encoding for the backend (default InvalidUtf8)
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg") --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats --drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account. --drive-impersonate string Impersonate this user when using a service account
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs. --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
--drive-keep-revision-forever Keep new head revision of each file forever. --drive-keep-revision-forever Keep new head revision of each file forever
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000) --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100) --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms) --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
--drive-root-folder-id string ID of the root folder --drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive. --drive-scope string Scope that rclone should use when requesting access from drive
--drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs. --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs
--drive-service-account-credentials string Service Account Credentials JSON blob --drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path --drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me. --drive-shared-with-me Only show files that are shared with me
--drive-size-as-quota Show sizes as storage quota usage, not actual size. --drive-size-as-quota Show sizes as storage quota usage, not actual size
--drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only. --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
--drive-skip-gdocs Skip google documents in all listings. --drive-skip-gdocs Skip google documents in all listings
--drive-skip-shortcuts If set skip shortcut files --drive-skip-shortcuts If set skip shortcut files
--drive-starred-only Only show files that are starred. --drive-starred-only Only show files that are starred
--drive-stop-on-download-limit Make download limit errors be fatal --drive-stop-on-download-limit Make download limit errors be fatal
--drive-stop-on-upload-limit Make upload limit errors be fatal --drive-stop-on-upload-limit Make upload limit errors be fatal
--drive-team-drive string ID of the Shared Drive (Team Drive) --drive-team-drive string ID of the Shared Drive (Team Drive)
--drive-token string OAuth Access Token as a JSON blob. --drive-token string OAuth Access Token as a JSON blob
--drive-token-url string Token server url. --drive-token-url string Token server url
--drive-trashed-only Only show files that are in the trash. --drive-trashed-only Only show files that are in the trash
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi) --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
--drive-use-created-date Use file created date instead of modified date., --drive-use-created-date Use file created date instead of modified date
--drive-use-shared-date Use date file was shared instead of modified date. --drive-use-shared-date Use date file was shared instead of modified date
--drive-use-trash Send files to the trash instead of deleting permanently. (default true) --drive-use-trash Send files to the trash instead of deleting permanently (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off) --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
--dropbox-auth-url string Auth server URL. --dropbox-auth-url string Auth server URL
--dropbox-batch-mode string Upload file batching sync|async|off. (default "sync") --dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
--dropbox-batch-size int Max number of files in upload batch. --dropbox-batch-size int Max number of files in upload batch
--dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150Mi). (default 48Mi) --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
--dropbox-client-id string OAuth Client Id --dropbox-client-id string OAuth Client Id
--dropbox-client-secret string OAuth Client Secret --dropbox-client-secret string OAuth Client Secret
--dropbox-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot) --dropbox-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
--dropbox-impersonate string Impersonate this user when using a business account. --dropbox-impersonate string Impersonate this user when using a business account
--dropbox-shared-files Instructs rclone to work on individual shared files. --dropbox-shared-files Instructs rclone to work on individual shared files
--dropbox-shared-folders Instructs rclone to work on shared folders. --dropbox-shared-folders Instructs rclone to work on shared folders
--dropbox-token string OAuth Access Token as a JSON blob. --dropbox-token string OAuth Access Token as a JSON blob
--dropbox-token-url string Token server url. --dropbox-token-url string Token server url
--fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
--fichier-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot) --fichier-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
--fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured) --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
--fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured) --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
--fichier-shared-folder string If you want to download a shared folder, add this parameter --fichier-shared-folder string If you want to download a shared folder, add this parameter
--filefabric-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot) --filefabric-encoding MultiEncoder This sets the encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
--filefabric-permanent-token string Permanent Authentication Token --filefabric-permanent-token string Permanent Authentication Token
--filefabric-root-folder-id string ID of the root folder --filefabric-root-folder-id string ID of the root folder
--filefabric-token string Session Token --filefabric-token string Session Token
--filefabric-token-expiry string Token expiry time --filefabric-token-expiry string Token expiry time
--filefabric-url string URL of the Enterprise File Fabric to connect to --filefabric-url string URL of the Enterprise File Fabric to connect to
--filefabric-version string Version read from the file fabric --filefabric-version string Version read from the file fabric
--ftp-close-timeout Duration Maximum time to wait for a response to close. (default 1m0s) --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
--ftp-disable-epsv Disable using EPSV even if server advertises support --ftp-disable-epsv Disable using EPSV even if server advertises support
--ftp-disable-mlsd Disable using MLSD even if server advertises support --ftp-disable-mlsd Disable using MLSD even if server advertises support
--ftp-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,RightSpace,Dot) --ftp-encoding MultiEncoder This sets the encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
--ftp-explicit-tls Use Explicit FTPS (FTP over TLS) --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
--ftp-host string FTP host to connect to --ftp-host string FTP host to connect to
--ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
@ -337,34 +337,34 @@ and may be set in the config file.
--ftp-tls Use Implicit FTPS (FTP over TLS) --ftp-tls Use Implicit FTPS (FTP over TLS)
--ftp-user string FTP username, leave blank for current username, $USER --ftp-user string FTP username, leave blank for current username, $USER
--gcs-anonymous Access public buckets and objects without credentials --gcs-anonymous Access public buckets and objects without credentials
--gcs-auth-url string Auth server URL. --gcs-auth-url string Auth server URL
--gcs-bucket-acl string Access Control List for new buckets. --gcs-bucket-acl string Access Control List for new buckets
--gcs-bucket-policy-only Access checks should use bucket-level IAM policies. --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
--gcs-client-id string OAuth Client Id --gcs-client-id string OAuth Client Id
--gcs-client-secret string OAuth Client Secret --gcs-client-secret string OAuth Client Secret
--gcs-encoding MultiEncoder This sets the encoding for the backend. (default Slash,CrLf,InvalidUtf8,Dot) --gcs-encoding MultiEncoder This sets the encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
--gcs-location string Location for the newly created buckets. --gcs-location string Location for the newly created buckets
--gcs-object-acl string Access Control List for new objects. --gcs-object-acl string Access Control List for new objects
--gcs-project-number string Project number. --gcs-project-number string Project number
--gcs-service-account-file string Service Account Credentials JSON file path --gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage. --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
--gcs-token string OAuth Access Token as a JSON blob. --gcs-token string OAuth Access Token as a JSON blob
--gcs-token-url string Token server url. --gcs-token-url string Token server url
--gphotos-auth-url string Auth server URL. --gphotos-auth-url string Auth server URL
--gphotos-client-id string OAuth Client Id --gphotos-client-id string OAuth Client Id
--gphotos-client-secret string OAuth Client Secret --gphotos-client-secret string OAuth Client Secret
--gphotos-include-archived Also view and download archived media. --gphotos-include-archived Also view and download archived media
--gphotos-read-only Set to make the Google Photos backend read only. --gphotos-read-only Set to make the Google Photos backend read only
--gphotos-read-size Set to read the size of media items. --gphotos-read-size Set to read the size of media items
--gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000) --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
--gphotos-token string OAuth Access Token as a JSON blob. --gphotos-token string OAuth Access Token as a JSON blob.
--gphotos-token-url string Token server url. --gphotos-token-url string Token server url
--hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default). --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
--hasher-hashes CommaSepList Comma separated list of supported checksum types. (default md5,sha1) --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
--hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever). (default off) --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
--hasher-remote string Remote to cache checksums for (e.g. myRemote:path). --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
--hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
--hdfs-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot) --hdfs-encoding MultiEncoder This sets the encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
--hdfs-namenode string hadoop name node and port --hdfs-namenode string hadoop name node and port
--hdfs-service-principal-name string Kerberos service principal name for the namenode --hdfs-service-principal-name string Kerberos service principal name for the namenode
--hdfs-username string hadoop user name --hdfs-username string hadoop user name
@ -372,173 +372,173 @@ and may be set in the config file.
--http-no-head Don't use HEAD requests to find file sizes in dir listing --http-no-head Don't use HEAD requests to find file sizes in dir listing
--http-no-slash Set this if the site doesn't end directories with / --http-no-slash Set this if the site doesn't end directories with /
--http-url string URL of http host to connect to --http-url string URL of http host to connect to
--hubic-auth-url string Auth server URL. --hubic-auth-url string Auth server URL
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5Gi) --hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
--hubic-client-id string OAuth Client Id --hubic-client-id string OAuth Client Id
--hubic-client-secret string OAuth Client Secret --hubic-client-secret string OAuth Client Secret
--hubic-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8) --hubic-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8)
--hubic-no-chunk Don't chunk files during streaming upload. --hubic-no-chunk Don't chunk files during streaming upload
--hubic-token string OAuth Access Token as a JSON blob. --hubic-token string OAuth Access Token as a JSON blob
--hubic-token-url string Token server url. --hubic-token-url string Token server url
--jottacloud-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot) --jottacloud-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash. --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10Mi) --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
--jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them. --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
--jottacloud-trashed-only Only show files that are in the trash. --jottacloud-trashed-only Only show files that are in the trash
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10Mi) --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
--koofr-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) --koofr-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--koofr-endpoint string The Koofr API endpoint to use (default "https://app.koofr.net") --koofr-endpoint string The Koofr API endpoint to use (default "https://app.koofr.net")
--koofr-mountid string Mount ID of the mount to use. If omitted, the primary mount is used. --koofr-mountid string Mount ID of the mount to use
--koofr-password string Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured) --koofr-password string Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
--koofr-setmtime Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend. (default true) --koofr-setmtime Does the backend support setting modification time (default true)
--koofr-user string Your Koofr user name --koofr-user string Your Koofr user name
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-case-insensitive Force the filesystem to report itself as case insensitive --local-case-insensitive Force the filesystem to report itself as case insensitive
--local-case-sensitive Force the filesystem to report itself as case sensitive. --local-case-sensitive Force the filesystem to report itself as case sensitive
--local-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Dot) --local-encoding MultiEncoder This sets the encoding for the backend (default Slash,Dot)
--local-no-check-updated Don't check to see if the files change during upload --local-no-check-updated Don't check to see if the files change during upload
--local-no-preallocate Disable preallocation of disk space for transferred files --local-no-preallocate Disable preallocation of disk space for transferred files
--local-no-set-modtime Disable setting modtime --local-no-set-modtime Disable setting modtime
--local-no-sparse Disable sparse files for multi-thread downloads --local-no-sparse Disable sparse files for multi-thread downloads
--local-nounc string Disable UNC (long path names) conversion on Windows --local-nounc string Disable UNC (long path names) conversion on Windows
--local-unicode-normalization Apply unicode NFC normalization to paths and filenames --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
--local-zero-size-links Assume the Stat size of links is zero (and read them instead) (Deprecated) --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
--mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true) --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
--mailru-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot) --mailru-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--mailru-pass string Password (obscured) --mailru-pass string Password (obscured)
--mailru-speedup-enable Skip full upload if there is another file with same data hash. (default true) --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
--mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash). (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf") --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
--mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi) --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
--mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk. (default 32Mi) --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
--mailru-user string User name (usually email) --mailru-user string User name (usually email)
--mega-debug Output more debug from Mega. --mega-debug Output more debug from Mega
--mega-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot) --mega-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8,Dot)
--mega-hard-delete Delete files permanently rather than putting them into the trash. --mega-hard-delete Delete files permanently rather than putting them into the trash
--mega-pass string Password. (obscured) --mega-pass string Password (obscured)
--mega-user string User name --mega-user string User name
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only). -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
--onedrive-auth-url string Auth server URL. --onedrive-auth-url string Auth server URL
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes). (default 10Mi) --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
--onedrive-client-id string OAuth Client Id --onedrive-client-id string OAuth Client Id
--onedrive-client-secret string OAuth Client Secret --onedrive-client-secret string OAuth Client Secret
--onedrive-drive-id string The ID of the drive to use --onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary ) --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
--onedrive-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot) --onedrive-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings. --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
--onedrive-link-password string Set the password for links created by the link command. --onedrive-link-password string Set the password for links created by the link command
--onedrive-link-scope string Set the scope of the links created by the link command. (default "anonymous") --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
--onedrive-link-type string Set the type of the links created by the link command. (default "view") --onedrive-link-type string Set the type of the links created by the link command (default "view")
--onedrive-list-chunk int Size of listing chunk. (default 1000) --onedrive-list-chunk int Size of listing chunk (default 1000)
--onedrive-no-versions Remove all versions on modifying operations --onedrive-no-versions Remove all versions on modifying operations
--onedrive-region string Choose national cloud region for OneDrive. (default "global") --onedrive-region string Choose national cloud region for OneDrive (default "global")
--onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs. --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs
--onedrive-token string OAuth Access Token as a JSON blob. --onedrive-token string OAuth Access Token as a JSON blob
--onedrive-token-url string Token server url. --onedrive-token-url string Token server url
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size. (default 10Mi) --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
--opendrive-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) --opendrive-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
--opendrive-password string Password. (obscured) --opendrive-password string Password (obscured)
--opendrive-username string Username --opendrive-username string Username
--pcloud-auth-url string Auth server URL. --pcloud-auth-url string Auth server URL
--pcloud-client-id string OAuth Client Id --pcloud-client-id string OAuth Client Id
--pcloud-client-secret string OAuth Client Secret --pcloud-client-secret string OAuth Client Secret
--pcloud-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) --pcloud-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--pcloud-hostname string Hostname to connect to. (default "api.pcloud.com") --pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
--pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point. (default "d0") --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
--pcloud-token string OAuth Access Token as a JSON blob. --pcloud-token string OAuth Access Token as a JSON blob
--pcloud-token-url string Token server url. --pcloud-token-url string Token server url
--premiumizeme-encoding MultiEncoder This sets the encoding for the backend. (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot) --premiumizeme-encoding MultiEncoder This sets the encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--putio-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) --putio-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--qingstor-access-key-id string QingStor Access Key ID --qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4Mi) --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
--qingstor-connection-retries int Number of connection retries. (default 3) --qingstor-connection-retries int Number of connection retries (default 3)
--qingstor-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Ctl,InvalidUtf8) --qingstor-encoding MultiEncoder This sets the encoding for the backend (default Slash,Ctl,InvalidUtf8)
--qingstor-endpoint string Enter an endpoint URL to connection QingStor API. --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank. --qingstor-env-auth Get QingStor credentials from runtime
--qingstor-secret-access-key string QingStor Secret Access Key (password) --qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1) --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--qingstor-zone string Zone to connect to. --qingstor-zone string Zone to connect to
--s3-access-key-id string AWS Access Key ID. --s3-access-key-id string AWS Access Key ID
--s3-acl string Canned ACL used when creating buckets and storing or copying objects. --s3-acl string Canned ACL used when creating buckets and storing or copying objects
--s3-bucket-acl string Canned ACL used when creating buckets. --s3-bucket-acl string Canned ACL used when creating buckets
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5Mi) --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
--s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
--s3-disable-checksum Don't store MD5 checksum with object metadata --s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-disable-http2 Disable usage of http2 for S3 backends --s3-disable-http2 Disable usage of http2 for S3 backends
--s3-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot) --s3-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8,Dot)
--s3-endpoint string Endpoint for S3 API. --s3-endpoint string Endpoint for S3 API
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true) --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
--s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
--s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request). (default 1000) --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
--s3-location-constraint string Location constraint - must be set to match the Region. --s3-location-constraint string Location constraint - must be set to match the Region
--s3-max-upload-parts int Maximum number of parts in a multipart upload. (default 10000) --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
--s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed. (default 1m0s) --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
--s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool. --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
--s3-no-check-bucket If set, don't attempt to check the bucket exists or create it --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
--s3-no-head If set, don't HEAD uploaded objects to check integrity --s3-no-head If set, don't HEAD uploaded objects to check integrity
--s3-no-head-object If set, don't HEAD objects --s3-no-head-object If set, don't HEAD objects
--s3-profile string Profile to use in the shared credentials file --s3-profile string Profile to use in the shared credentials file
--s3-provider string Choose your S3 provider. --s3-provider string Choose your S3 provider
--s3-region string Region to connect to. --s3-region string Region to connect to
--s3-requester-pays Enables requester pays option when interacting with S3 bucket. --s3-requester-pays Enables requester pays option when interacting with S3 bucket
--s3-secret-access-key string AWS Secret Access Key (password) --s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3. --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
--s3-session-token string An AWS session token --s3-session-token string An AWS session token
--s3-shared-credentials-file string Path to the shared credentials file --s3-shared-credentials-file string Path to the shared credentials file
--s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3. --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
--s3-sse-customer-key string If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data. --s3-sse-customer-key string If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data
--s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional). --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key. --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
--s3-storage-class string The storage class to use when storing new objects in S3. --s3-storage-class string The storage class to use when storing new objects in S3
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4) --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint. --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
--s3-v2-auth If true use v2 authentication. --s3-v2-auth If true use v2 authentication
--seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled) --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
--seafile-create-library Should rclone create a library if it doesn't exist --seafile-create-library Should rclone create a library if it doesn't exist
--seafile-encoding MultiEncoder This sets the encoding for the backend. (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8) --seafile-encoding MultiEncoder This sets the encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
--seafile-library string Name of the library. Leave blank to access all non-encrypted libraries. --seafile-library string Name of the library
--seafile-library-key string Library password (for encrypted libraries only). Leave blank if you pass it through the command line. (obscured) --seafile-library-key string Library password (for encrypted libraries only) (obscured)
--seafile-pass string Password (obscured) --seafile-pass string Password (obscured)
--seafile-url string URL of seafile host to connect to --seafile-url string URL of seafile host to connect to
--seafile-user string User name (usually email address) --seafile-user string User name (usually email address)
--sftp-ask-password Allow asking for SFTP password when needed. --sftp-ask-password Allow asking for SFTP password when needed
--sftp-disable-concurrent-reads If set don't use concurrent reads --sftp-disable-concurrent-reads If set don't use concurrent reads
--sftp-disable-concurrent-writes If set don't use concurrent writes --sftp-disable-concurrent-writes If set don't use concurrent writes
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available. --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
--sftp-host string SSH host to connect to --sftp-host string SSH host to connect to
--sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent. --sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file. (obscured) --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
--sftp-key-pem string Raw PEM-encoded private key, If specified, will override key_file parameter. --sftp-key-pem string Raw PEM-encoded private key, If specified, will override key_file parameter
--sftp-key-use-agent When set forces the usage of the ssh-agent. --sftp-key-use-agent When set forces the usage of the ssh-agent
--sftp-known-hosts-file string Optional path to known_hosts file. --sftp-known-hosts-file string Optional path to known_hosts file
--sftp-md5sum-command string The command used to read md5 hashes. Leave blank for autodetect. --sftp-md5sum-command string The command used to read md5 hashes
--sftp-pass string SSH password, leave blank to use ssh-agent. (obscured) --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
--sftp-path-override string Override path used by SSH connection. --sftp-path-override string Override path used by SSH connection
--sftp-port string SSH port, leave blank to use default (22) --sftp-port string SSH port, leave blank to use default (22)
--sftp-pubkey-file string Optional path to public key file. --sftp-pubkey-file string Optional path to public key file
--sftp-server-command string Specifies the path or command to run a sftp server on the remote host. --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
--sftp-set-modtime Set the modified time on the remote if set. (default true) --sftp-set-modtime Set the modified time on the remote if set (default true)
--sftp-sha1sum-command string The command used to read sha1 hashes. Leave blank for autodetect. --sftp-sha1sum-command string The command used to read sha1 hashes
--sftp-skip-links Set to skip any symlinks and any other non regular files. --sftp-skip-links Set to skip any symlinks and any other non regular files
--sftp-subsystem string Specifies the SSH2 subsystem on the remote host. (default "sftp") --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
--sftp-use-fstat If set use fstat instead of stat --sftp-use-fstat If set use fstat instead of stat
--sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods. --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
--sftp-user string SSH username, leave blank for current username, $USER --sftp-user string SSH username, leave blank for current username, $USER
--sharefile-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 64Mi) --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
--sharefile-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot) --sharefile-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
--sharefile-endpoint string Endpoint for API calls. --sharefile-endpoint string Endpoint for API calls
--sharefile-root-folder-id string ID of the root folder --sharefile-root-folder-id string ID of the root folder
--sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload. (default 128Mi) --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
--skip-links Don't warn about skipped symlinks. --skip-links Don't warn about skipped symlinks
--sugarsync-access-key-id string Sugarsync Access Key ID. --sugarsync-access-key-id string Sugarsync Access Key ID
--sugarsync-app-id string Sugarsync App ID. --sugarsync-app-id string Sugarsync App ID
--sugarsync-authorization string Sugarsync authorization --sugarsync-authorization string Sugarsync authorization
--sugarsync-authorization-expiry string Sugarsync authorization expiry --sugarsync-authorization-expiry string Sugarsync authorization expiry
--sugarsync-deleted-id string Sugarsync deleted folder id --sugarsync-deleted-id string Sugarsync deleted folder id
--sugarsync-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Ctl,InvalidUtf8,Dot) --sugarsync-encoding MultiEncoder This sets the encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
--sugarsync-hard-delete Permanently delete files if true --sugarsync-hard-delete Permanently delete files if true
--sugarsync-private-access-key string Sugarsync Private Access Key --sugarsync-private-access-key string Sugarsync Private Access Key
--sugarsync-refresh-token string Sugarsync refresh token --sugarsync-refresh-token string Sugarsync refresh token
@ -547,56 +547,56 @@ and may be set in the config file.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID) --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME) --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET) --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL). --swift-auth string Authentication URL for server (OS_AUTH_URL)
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5Gi) --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8) --swift-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form. --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
--swift-key string API key or password (OS_PASSWORD). --swift-key string API key or password (OS_PASSWORD)
--swift-leave-parts-on-error If true avoid calling abort upload on a failure. It should be set to true for resuming uploads across different sessions. --swift-leave-parts-on-error If true avoid calling abort upload on a failure
--swift-no-chunk Don't chunk files during streaming upload. --swift-no-chunk Don't chunk files during streaming upload
--swift-region string Region name - optional (OS_REGION_NAME) --swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container --swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL) --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME). --swift-user string User name to log in (OS_USERNAME)
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID). --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
--tardigrade-access-grant string Access Grant. --tardigrade-access-grant string Access grant
--tardigrade-api-key string API Key. --tardigrade-api-key string API key
--tardigrade-passphrase string Encryption Passphrase. To access existing objects enter passphrase used for uploading. --tardigrade-passphrase string Encryption passphrase
--tardigrade-provider string Choose an authentication method. (default "existing") --tardigrade-provider string Choose an authentication method (default "existing")
--tardigrade-satellite-address <nodeid>@<address>:<port> Satellite Address. Custom satellite address should match the format: <nodeid>@<address>:<port>. (default "us-central-1.tardigrade.io") --tardigrade-satellite-address <nodeid>@<address>:<port> Satellite address (default "us-central-1.tardigrade.io")
--union-action-policy string Policy to choose upstream on ACTION category. (default "epall") --union-action-policy string Policy to choose upstream on ACTION category (default "epall")
--union-cache-time int Cache time of usage and free space (in seconds). This option is only useful when a path preserving policy is used. (default 120) --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
--union-create-policy string Policy to choose upstream on CREATE category. (default "epmfs") --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
--union-search-policy string Policy to choose upstream on SEARCH category. (default "ff") --union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
--union-upstreams string List of space separated upstreams. --union-upstreams string List of space separated upstreams
--uptobox-access-token string Your access Token, get it from https://uptobox.com/my_account --uptobox-access-token string Your access Token, get it from https://uptobox.com/my_account
--uptobox-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) --uptobox-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
--webdav-bearer-token-command string Command to run to get a bearer token --webdav-bearer-token-command string Command to run to get a bearer token
--webdav-encoding string This sets the encoding for the backend. --webdav-encoding string This sets the encoding for the backend
--webdav-headers CommaSepList Set HTTP headers for all transactions --webdav-headers CommaSepList Set HTTP headers for all transactions
--webdav-pass string Password. (obscured) --webdav-pass string Password (obscured)
--webdav-url string URL of http host to connect to --webdav-url string URL of http host to connect to
--webdav-user string User name. In case NTLM authentication is used, the username should be in the format 'Domain\User'. --webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using --webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-auth-url string Auth server URL. --yandex-auth-url string Auth server URL
--yandex-client-id string OAuth Client Id --yandex-client-id string OAuth Client Id
--yandex-client-secret string OAuth Client Secret --yandex-client-secret string OAuth Client Secret
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot) --yandex-encoding MultiEncoder This sets the encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
--yandex-token string OAuth Access Token as a JSON blob. --yandex-token string OAuth Access Token as a JSON blob
--yandex-token-url string Token server url. --yandex-token-url string Token server url
--zoho-auth-url string Auth server URL. --zoho-auth-url string Auth server URL
--zoho-client-id string OAuth Client Id --zoho-client-id string OAuth Client Id
--zoho-client-secret string OAuth Client Secret --zoho-client-secret string OAuth Client Secret
--zoho-encoding MultiEncoder This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8) --zoho-encoding MultiEncoder This sets the encoding for the backend (default Del,Ctl,InvalidUtf8)
--zoho-region string Zoho region to connect to. --zoho-region string Zoho region to connect to
--zoho-token string OAuth Access Token as a JSON blob. --zoho-token string OAuth Access Token as a JSON blob
--zoho-token-url string Token server url. --zoho-token-url string Token server url
``` ```

View File

@ -235,7 +235,7 @@ func init() {
Fn: rcDeleteStats, Fn: rcDeleteStats,
Title: "Delete stats group.", Title: "Delete stats group.",
Help: ` Help: `
This deletes entire stats group This deletes entire stats group.
Parameters Parameters

View File

@ -51,7 +51,7 @@ const (
ConfigEncoding = "encoding" ConfigEncoding = "encoding"
// ConfigEncodingHelp is the help for ConfigEncoding // ConfigEncodingHelp is the help for ConfigEncoding
ConfigEncodingHelp = "This sets the encoding for the backend.\n\nSee: the [encoding section in the overview](/overview/#encoding) for more info." ConfigEncodingHelp = "This sets the encoding for the backend.\n\nSee the [encoding section in the overview](/overview/#encoding) for more info."
// ConfigAuthorize indicates that we just want "rclone authorize" // ConfigAuthorize indicates that we just want "rclone authorize"
ConfigAuthorize = "config_authorize" ConfigAuthorize = "config_authorize"

View File

@ -46,16 +46,16 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)") flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)")
flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible") flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible")
flags.DurationVarP(flagSet, &ci.ModifyWindow, "modify-window", "", ci.ModifyWindow, "Max time diff to be considered the same") flags.DurationVarP(flagSet, &ci.ModifyWindow, "modify-window", "", ci.ModifyWindow, "Max time diff to be considered the same")
flags.IntVarP(flagSet, &ci.Checkers, "checkers", "", ci.Checkers, "Number of checkers to run in parallel.") flags.IntVarP(flagSet, &ci.Checkers, "checkers", "", ci.Checkers, "Number of checkers to run in parallel")
flags.IntVarP(flagSet, &ci.Transfers, "transfers", "", ci.Transfers, "Number of file transfers to run in parallel.") flags.IntVarP(flagSet, &ci.Transfers, "transfers", "", ci.Transfers, "Number of file transfers to run in parallel")
flags.StringVarP(flagSet, &configPath, "config", "", config.GetConfigPath(), "Config file.") flags.StringVarP(flagSet, &configPath, "config", "", config.GetConfigPath(), "Config file")
flags.StringVarP(flagSet, &cacheDir, "cache-dir", "", config.GetCacheDir(), "Directory rclone will use for caching.") flags.StringVarP(flagSet, &cacheDir, "cache-dir", "", config.GetCacheDir(), "Directory rclone will use for caching")
flags.StringVarP(flagSet, &tempDir, "temp-dir", "", os.TempDir(), "Directory rclone will use for temporary files.") flags.StringVarP(flagSet, &tempDir, "temp-dir", "", os.TempDir(), "Directory rclone will use for temporary files")
flags.BoolVarP(flagSet, &ci.CheckSum, "checksum", "c", ci.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size") flags.BoolVarP(flagSet, &ci.CheckSum, "checksum", "c", ci.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size")
flags.BoolVarP(flagSet, &ci.SizeOnly, "size-only", "", ci.SizeOnly, "Skip based on size only, not mod-time or checksum") flags.BoolVarP(flagSet, &ci.SizeOnly, "size-only", "", ci.SizeOnly, "Skip based on size only, not mod-time or checksum")
flags.BoolVarP(flagSet, &ci.IgnoreTimes, "ignore-times", "I", ci.IgnoreTimes, "Don't skip files that match size and time - transfer all files") flags.BoolVarP(flagSet, &ci.IgnoreTimes, "ignore-times", "I", ci.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
flags.BoolVarP(flagSet, &ci.IgnoreExisting, "ignore-existing", "", ci.IgnoreExisting, "Skip all files that exist on destination") flags.BoolVarP(flagSet, &ci.IgnoreExisting, "ignore-existing", "", ci.IgnoreExisting, "Skip all files that exist on destination")
flags.BoolVarP(flagSet, &ci.IgnoreErrors, "ignore-errors", "", ci.IgnoreErrors, "delete even if there are I/O errors") flags.BoolVarP(flagSet, &ci.IgnoreErrors, "ignore-errors", "", ci.IgnoreErrors, "Delete even if there are I/O errors")
flags.BoolVarP(flagSet, &ci.DryRun, "dry-run", "n", ci.DryRun, "Do a trial run with no permanent changes") flags.BoolVarP(flagSet, &ci.DryRun, "dry-run", "n", ci.DryRun, "Do a trial run with no permanent changes")
flags.BoolVarP(flagSet, &ci.Interactive, "interactive", "i", ci.Interactive, "Enable interactive mode") flags.BoolVarP(flagSet, &ci.Interactive, "interactive", "i", ci.Interactive, "Enable interactive mode")
flags.DurationVarP(flagSet, &ci.ConnectTimeout, "contimeout", "", ci.ConnectTimeout, "Connect timeout") flags.DurationVarP(flagSet, &ci.ConnectTimeout, "contimeout", "", ci.ConnectTimeout, "Connect timeout")
@ -63,79 +63,79 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
flags.DurationVarP(flagSet, &ci.ExpectContinueTimeout, "expect-continue-timeout", "", ci.ExpectContinueTimeout, "Timeout when using expect / 100-continue in HTTP") flags.DurationVarP(flagSet, &ci.ExpectContinueTimeout, "expect-continue-timeout", "", ci.ExpectContinueTimeout, "Timeout when using expect / 100-continue in HTTP")
flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP headers - may contain sensitive info") flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info") flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
flags.BoolVarP(flagSet, &ci.InsecureSkipVerify, "no-check-certificate", "", ci.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.") flags.BoolVarP(flagSet, &ci.InsecureSkipVerify, "no-check-certificate", "", ci.InsecureSkipVerify, "Do not verify the server SSL certificate (insecure)")
flags.BoolVarP(flagSet, &ci.AskPassword, "ask-password", "", ci.AskPassword, "Allow prompt for password for encrypted configuration.") flags.BoolVarP(flagSet, &ci.AskPassword, "ask-password", "", ci.AskPassword, "Allow prompt for password for encrypted configuration")
flags.FVarP(flagSet, &ci.PasswordCommand, "password-command", "", "Command for supplying password for encrypted configuration.") flags.FVarP(flagSet, &ci.PasswordCommand, "password-command", "", "Command for supplying password for encrypted configuration")
flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transferring") flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transferring")
flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer") flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer")
flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)") flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)")
flags.Int64VarP(flagSet, &ci.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes") flags.Int64VarP(flagSet, &ci.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
flags.BoolVarP(flagSet, &ci.TrackRenames, "track-renames", "", ci.TrackRenames, "When synchronizing, track file renames and do a server-side move if possible") flags.BoolVarP(flagSet, &ci.TrackRenames, "track-renames", "", ci.TrackRenames, "When synchronizing, track file renames and do a server-side move if possible")
flags.StringVarP(flagSet, &ci.TrackRenamesStrategy, "track-renames-strategy", "", ci.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf") flags.StringVarP(flagSet, &ci.TrackRenamesStrategy, "track-renames-strategy", "", ci.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf")
flags.IntVarP(flagSet, &ci.LowLevelRetries, "low-level-retries", "", ci.LowLevelRetries, "Number of low level retries to do.") flags.IntVarP(flagSet, &ci.LowLevelRetries, "low-level-retries", "", ci.LowLevelRetries, "Number of low level retries to do")
flags.BoolVarP(flagSet, &ci.UpdateOlder, "update", "u", ci.UpdateOlder, "Skip files that are newer on the destination.") flags.BoolVarP(flagSet, &ci.UpdateOlder, "update", "u", ci.UpdateOlder, "Skip files that are newer on the destination")
flags.BoolVarP(flagSet, &ci.UseServerModTime, "use-server-modtime", "", ci.UseServerModTime, "Use server modified time instead of object metadata") flags.BoolVarP(flagSet, &ci.UseServerModTime, "use-server-modtime", "", ci.UseServerModTime, "Use server modified time instead of object metadata")
flags.BoolVarP(flagSet, &ci.NoGzip, "no-gzip-encoding", "", ci.NoGzip, "Don't set Accept-Encoding: gzip.") flags.BoolVarP(flagSet, &ci.NoGzip, "no-gzip-encoding", "", ci.NoGzip, "Don't set Accept-Encoding: gzip")
flags.IntVarP(flagSet, &ci.MaxDepth, "max-depth", "", ci.MaxDepth, "If set limits the recursion depth to this.") flags.IntVarP(flagSet, &ci.MaxDepth, "max-depth", "", ci.MaxDepth, "If set limits the recursion depth to this")
flags.BoolVarP(flagSet, &ci.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.") flags.BoolVarP(flagSet, &ci.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum")
flags.BoolVarP(flagSet, &ci.IgnoreChecksum, "ignore-checksum", "", ci.IgnoreChecksum, "Skip post copy check of checksums.") flags.BoolVarP(flagSet, &ci.IgnoreChecksum, "ignore-checksum", "", ci.IgnoreChecksum, "Skip post copy check of checksums")
flags.BoolVarP(flagSet, &ci.IgnoreCaseSync, "ignore-case-sync", "", ci.IgnoreCaseSync, "Ignore case when synchronizing") flags.BoolVarP(flagSet, &ci.IgnoreCaseSync, "ignore-case-sync", "", ci.IgnoreCaseSync, "Ignore case when synchronizing")
flags.BoolVarP(flagSet, &ci.NoTraverse, "no-traverse", "", ci.NoTraverse, "Don't traverse destination file system on copy.") flags.BoolVarP(flagSet, &ci.NoTraverse, "no-traverse", "", ci.NoTraverse, "Don't traverse destination file system on copy")
flags.BoolVarP(flagSet, &ci.CheckFirst, "check-first", "", ci.CheckFirst, "Do all the checks before starting transfers.") flags.BoolVarP(flagSet, &ci.CheckFirst, "check-first", "", ci.CheckFirst, "Do all the checks before starting transfers")
flags.BoolVarP(flagSet, &ci.NoCheckDest, "no-check-dest", "", ci.NoCheckDest, "Don't check the destination, copy regardless.") flags.BoolVarP(flagSet, &ci.NoCheckDest, "no-check-dest", "", ci.NoCheckDest, "Don't check the destination, copy regardless")
flags.BoolVarP(flagSet, &ci.NoUnicodeNormalization, "no-unicode-normalization", "", ci.NoUnicodeNormalization, "Don't normalize unicode characters in filenames.") flags.BoolVarP(flagSet, &ci.NoUnicodeNormalization, "no-unicode-normalization", "", ci.NoUnicodeNormalization, "Don't normalize unicode characters in filenames")
flags.BoolVarP(flagSet, &ci.NoUpdateModTime, "no-update-modtime", "", ci.NoUpdateModTime, "Don't update destination mod-time if files identical.") flags.BoolVarP(flagSet, &ci.NoUpdateModTime, "no-update-modtime", "", ci.NoUpdateModTime, "Don't update destination mod-time if files identical")
flags.StringArrayVarP(flagSet, &ci.CompareDest, "compare-dest", "", nil, "Include additional comma separated server-side paths during comparison.") flags.StringArrayVarP(flagSet, &ci.CompareDest, "compare-dest", "", nil, "Include additional comma separated server-side paths during comparison")
flags.StringArrayVarP(flagSet, &ci.CopyDest, "copy-dest", "", nil, "Implies --compare-dest but also copies files from paths into destination.") flags.StringArrayVarP(flagSet, &ci.CopyDest, "copy-dest", "", nil, "Implies --compare-dest but also copies files from paths into destination")
flags.StringVarP(flagSet, &ci.BackupDir, "backup-dir", "", ci.BackupDir, "Make backups into hierarchy based in DIR.") flags.StringVarP(flagSet, &ci.BackupDir, "backup-dir", "", ci.BackupDir, "Make backups into hierarchy based in DIR")
flags.StringVarP(flagSet, &ci.Suffix, "suffix", "", ci.Suffix, "Suffix to add to changed files.") flags.StringVarP(flagSet, &ci.Suffix, "suffix", "", ci.Suffix, "Suffix to add to changed files")
flags.BoolVarP(flagSet, &ci.SuffixKeepExtension, "suffix-keep-extension", "", ci.SuffixKeepExtension, "Preserve the extension when using --suffix.") flags.BoolVarP(flagSet, &ci.SuffixKeepExtension, "suffix-keep-extension", "", ci.SuffixKeepExtension, "Preserve the extension when using --suffix")
flags.BoolVarP(flagSet, &ci.UseListR, "fast-list", "", ci.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.") flags.BoolVarP(flagSet, &ci.UseListR, "fast-list", "", ci.UseListR, "Use recursive list if available; uses more memory but fewer transactions")
flags.Float64VarP(flagSet, &ci.TPSLimit, "tpslimit", "", ci.TPSLimit, "Limit HTTP transactions per second to this.") flags.Float64VarP(flagSet, &ci.TPSLimit, "tpslimit", "", ci.TPSLimit, "Limit HTTP transactions per second to this")
flags.IntVarP(flagSet, &ci.TPSLimitBurst, "tpslimit-burst", "", ci.TPSLimitBurst, "Max burst of transactions for --tpslimit.") flags.IntVarP(flagSet, &ci.TPSLimitBurst, "tpslimit-burst", "", ci.TPSLimitBurst, "Max burst of transactions for --tpslimit")
flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.") flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name")
flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features. Use --disable help to see a list.") flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features (use --disable help to see a list)")
flags.StringVarP(flagSet, &ci.UserAgent, "user-agent", "", ci.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version") flags.StringVarP(flagSet, &ci.UserAgent, "user-agent", "", ci.UserAgent, "Set the user-agent to a specified string")
flags.BoolVarP(flagSet, &ci.Immutable, "immutable", "", ci.Immutable, "Do not modify files. Fail if existing files have been modified.") flags.BoolVarP(flagSet, &ci.Immutable, "immutable", "", ci.Immutable, "Do not modify files, fail if existing files have been modified")
flags.BoolVarP(flagSet, &ci.AutoConfirm, "auto-confirm", "", ci.AutoConfirm, "If enabled, do not request console confirmation.") flags.BoolVarP(flagSet, &ci.AutoConfirm, "auto-confirm", "", ci.AutoConfirm, "If enabled, do not request console confirmation")
flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit") flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats (0 for no limit)")
flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable.") flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable")
flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable.") flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable")
flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.") flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer")
flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.") flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends")
flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList) flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
flags.FVarP(flagSet, &ci.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.") flags.FVarP(flagSet, &ci.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer")
flags.DurationVarP(flagSet, &ci.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for.") flags.DurationVarP(flagSet, &ci.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for")
flags.FVarP(flagSet, &ci.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS") flags.FVarP(flagSet, &ci.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS")
flags.IntVarP(flagSet, &ci.MaxBacklog, "max-backlog", "", ci.MaxBacklog, "Maximum number of objects in sync or check backlog.") flags.IntVarP(flagSet, &ci.MaxBacklog, "max-backlog", "", ci.MaxBacklog, "Maximum number of objects in sync or check backlog")
flags.IntVarP(flagSet, &ci.MaxStatsGroups, "max-stats-groups", "", ci.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.") flags.IntVarP(flagSet, &ci.MaxStatsGroups, "max-stats-groups", "", ci.MaxStatsGroups, "Maximum number of stats groups to keep in memory, on max oldest is discarded")
flags.BoolVarP(flagSet, &ci.StatsOneLine, "stats-one-line", "", ci.StatsOneLine, "Make the stats fit on one line.") flags.BoolVarP(flagSet, &ci.StatsOneLine, "stats-one-line", "", ci.StatsOneLine, "Make the stats fit on one line")
flags.BoolVarP(flagSet, &ci.StatsOneLineDate, "stats-one-line-date", "", ci.StatsOneLineDate, "Enables --stats-one-line and add current date/time prefix.") flags.BoolVarP(flagSet, &ci.StatsOneLineDate, "stats-one-line-date", "", ci.StatsOneLineDate, "Enable --stats-one-line and add current date/time prefix")
flags.StringVarP(flagSet, &ci.StatsOneLineDateFormat, "stats-one-line-date-format", "", ci.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format") flags.StringVarP(flagSet, &ci.StatsOneLineDateFormat, "stats-one-line-date-format", "", ci.StatsOneLineDateFormat, "Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes (\"), see https://golang.org/pkg/time/#Time.Format")
flags.BoolVarP(flagSet, &ci.ErrorOnNoTransfer, "error-on-no-transfer", "", ci.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts") flags.BoolVarP(flagSet, &ci.ErrorOnNoTransfer, "error-on-no-transfer", "", ci.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts")
flags.BoolVarP(flagSet, &ci.Progress, "progress", "P", ci.Progress, "Show progress during transfer.") flags.BoolVarP(flagSet, &ci.Progress, "progress", "P", ci.Progress, "Show progress during transfer")
flags.BoolVarP(flagSet, &ci.ProgressTerminalTitle, "progress-terminal-title", "", ci.ProgressTerminalTitle, "Show progress on the terminal title. Requires -P/--progress.") flags.BoolVarP(flagSet, &ci.ProgressTerminalTitle, "progress-terminal-title", "", ci.ProgressTerminalTitle, "Show progress on the terminal title (requires -P/--progress)")
flags.BoolVarP(flagSet, &ci.Cookie, "use-cookies", "", ci.Cookie, "Enable session cookiejar.") flags.BoolVarP(flagSet, &ci.Cookie, "use-cookies", "", ci.Cookie, "Enable session cookiejar")
flags.BoolVarP(flagSet, &ci.UseMmap, "use-mmap", "", ci.UseMmap, "Use mmap allocator (see docs).") flags.BoolVarP(flagSet, &ci.UseMmap, "use-mmap", "", ci.UseMmap, "Use mmap allocator (see docs)")
flags.StringVarP(flagSet, &ci.CaCert, "ca-cert", "", ci.CaCert, "CA certificate used to verify servers") flags.StringVarP(flagSet, &ci.CaCert, "ca-cert", "", ci.CaCert, "CA certificate used to verify servers")
flags.StringVarP(flagSet, &ci.ClientCert, "client-cert", "", ci.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth") flags.StringVarP(flagSet, &ci.ClientCert, "client-cert", "", ci.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth")
flags.StringVarP(flagSet, &ci.ClientKey, "client-key", "", ci.ClientKey, "Client SSL private key (PEM) for mutual TLS auth") flags.StringVarP(flagSet, &ci.ClientKey, "client-key", "", ci.ClientKey, "Client SSL private key (PEM) for mutual TLS auth")
flags.FVarP(flagSet, &ci.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size.") flags.FVarP(flagSet, &ci.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size")
flags.IntVarP(flagSet, &ci.MultiThreadStreams, "multi-thread-streams", "", ci.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.") flags.IntVarP(flagSet, &ci.MultiThreadStreams, "multi-thread-streams", "", ci.MultiThreadStreams, "Max number of streams to use for multi-thread downloads")
flags.BoolVarP(flagSet, &ci.UseJSONLog, "use-json-log", "", ci.UseJSONLog, "Use json log format.") flags.BoolVarP(flagSet, &ci.UseJSONLog, "use-json-log", "", ci.UseJSONLog, "Use json log format")
flags.StringVarP(flagSet, &ci.OrderBy, "order-by", "", ci.OrderBy, "Instructions on how to order the transfers, e.g. 'size,descending'") flags.StringVarP(flagSet, &ci.OrderBy, "order-by", "", ci.OrderBy, "Instructions on how to order the transfers, e.g. 'size,descending'")
flags.StringArrayVarP(flagSet, &uploadHeaders, "header-upload", "", nil, "Set HTTP header for upload transactions") flags.StringArrayVarP(flagSet, &uploadHeaders, "header-upload", "", nil, "Set HTTP header for upload transactions")
flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions") flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions")
flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions") flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions")
flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files.") flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files")
flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window. Supported on Windows only.") flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window (supported on Windows only)")
flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.") flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21")
flags.DurationVarP(flagSet, &ci.FsCacheExpireDuration, "fs-cache-expire-duration", "", ci.FsCacheExpireDuration, "cache remotes for this long (0 to disable caching)") flags.DurationVarP(flagSet, &ci.FsCacheExpireDuration, "fs-cache-expire-duration", "", ci.FsCacheExpireDuration, "Cache remotes for this long (0 to disable caching)")
flags.DurationVarP(flagSet, &ci.FsCacheExpireInterval, "fs-cache-expire-interval", "", ci.FsCacheExpireInterval, "interval to check for expired remotes") flags.DurationVarP(flagSet, &ci.FsCacheExpireInterval, "fs-cache-expire-interval", "", ci.FsCacheExpireInterval, "Interval to check for expired remotes")
flags.BoolVarP(flagSet, &ci.DisableHTTP2, "disable-http2", "", ci.DisableHTTP2, "Disable HTTP/2 in the global transport.") flags.BoolVarP(flagSet, &ci.DisableHTTP2, "disable-http2", "", ci.DisableHTTP2, "Disable HTTP/2 in the global transport")
flags.BoolVarP(flagSet, &ci.HumanReadable, "human-readable", "", ci.HumanReadable, "Print numbers in a human-readable format. Sizes with suffix Ki|Mi|Gi|Ti|Pi.") flags.BoolVarP(flagSet, &ci.HumanReadable, "human-readable", "", ci.HumanReadable, "Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi")
flags.DurationVarP(flagSet, &ci.KvLockTime, "kv-lock-time", "", ci.KvLockTime, "Maximum time to keep key-value database locked by process") flags.DurationVarP(flagSet, &ci.KvLockTime, "kv-lock-time", "", ci.KvLockTime, "Maximum time to keep key-value database locked by process")
} }

View File

@ -130,7 +130,7 @@ func init() {
return rcConfig(ctx, in, name) return rcConfig(ctx, in, name)
}, },
Title: name + " the config for a remote.", Title: name + " the config for a remote.",
Help: `This takes the following parameters Help: `This takes the following parameters:
- name - name of remote - name - name of remote
- parameters - a map of \{ "key": "value" \} pairs - parameters - a map of \{ "key": "value" \} pairs

View File

@ -16,5 +16,5 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.StringVarP(flagSet, &log.Opt.Format, "log-format", "", log.Opt.Format, "Comma separated list of log format options") flags.StringVarP(flagSet, &log.Opt.Format, "log-format", "", log.Opt.Format, "Comma separated list of log format options")
flags.BoolVarP(flagSet, &log.Opt.UseSyslog, "syslog", "", log.Opt.UseSyslog, "Use Syslog for logging") flags.BoolVarP(flagSet, &log.Opt.UseSyslog, "syslog", "", log.Opt.UseSyslog, "Use Syslog for logging")
flags.StringVarP(flagSet, &log.Opt.SyslogFacility, "syslog-facility", "", log.Opt.SyslogFacility, "Facility for syslog, e.g. KERN,USER,...") flags.StringVarP(flagSet, &log.Opt.SyslogFacility, "syslog-facility", "", log.Opt.SyslogFacility, "Facility for syslog, e.g. KERN,USER,...")
flags.BoolVarP(flagSet, &log.Opt.LogSystemdSupport, "log-systemd", "", log.Opt.LogSystemdSupport, "Activate systemd integration for the logger.") flags.BoolVarP(flagSet, &log.Opt.LogSystemdSupport, "log-systemd", "", log.Opt.LogSystemdSupport, "Activate systemd integration for the logger")
} }

View File

@ -21,7 +21,7 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcList, Fn: rcList,
Title: "List the given remote and path in JSON format", Title: "List the given remote and path in JSON format",
Help: `This takes the following parameters Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:" - fs - a remote name string e.g. "drive:"
- remote - a path within that remote e.g. "dir" - remote - a path within that remote e.g. "dir"
@ -36,7 +36,7 @@ func init() {
- filesOnly - If set only show files - filesOnly - If set only show files
- hashTypes - array of strings of hash types to show if showHash set - hashTypes - array of strings of hash types to show if showHash set
The result is Returns:
- list - list
- This is an array of objects as described in the lsjson command - This is an array of objects as described in the lsjson command
@ -121,7 +121,7 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcAbout, Fn: rcAbout,
Title: "Return the space used on the remote", Title: "Return the space used on the remote",
Help: `This takes the following parameters Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:" - fs - a remote name string e.g. "drive:"
@ -167,7 +167,7 @@ func init() {
return rcMoveOrCopyFile(ctx, in, copy) return rcMoveOrCopyFile(ctx, in, copy)
}, },
Title: name + " a file from source remote to destination remote", Title: name + " a file from source remote to destination remote",
Help: `This takes the following parameters Help: `This takes the following parameters:
- srcFs - a remote name string e.g. "drive:" for the source - srcFs - a remote name string e.g. "drive:" for the source
- srcRemote - a path within that remote e.g. "file.txt" for the source - srcRemote - a path within that remote e.g. "file.txt" for the source
@ -202,7 +202,7 @@ func init() {
{name: "mkdir", title: "Make a destination directory or container"}, {name: "mkdir", title: "Make a destination directory or container"},
{name: "rmdir", title: "Remove an empty directory or container"}, {name: "rmdir", title: "Remove an empty directory or container"},
{name: "purge", title: "Remove a directory or container and all of its contents"}, {name: "purge", title: "Remove a directory or container and all of its contents"},
{name: "rmdirs", title: "Remove all the empty directories in the path", help: "- leaveRoot - boolean, set to true not to delete the root\n"}, {name: "rmdirs", title: "Remove all the empty directories in the path", help: "- leaveRoot - boolean, set to true not to delete the root"},
{name: "delete", title: "Remove files in the path", noRemote: true}, {name: "delete", title: "Remove files in the path", noRemote: true},
{name: "deletefile", title: "Remove the single file pointed to"}, {name: "deletefile", title: "Remove the single file pointed to"},
{name: "copyurl", title: "Copy the URL to the object", help: "- url - string, URL to read from\n - autoFilename - boolean, set to true to retrieve destination file name from url"}, {name: "copyurl", title: "Copy the URL to the object", help: "- url - string, URL to read from\n - autoFilename - boolean, set to true to retrieve destination file name from url"},
@ -222,7 +222,7 @@ func init() {
return rcSingleCommand(ctx, in, op.name, op.noRemote) return rcSingleCommand(ctx, in, op.name, op.noRemote)
}, },
Title: op.title, Title: op.title,
Help: `This takes the following parameters Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:" - fs - a remote name string e.g. "drive:"
` + remote + op.help + ` ` + remote + op.help + `
@ -324,11 +324,11 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcSize, Fn: rcSize,
Title: "Count the number of bytes and files in remote", Title: "Count the number of bytes and files in remote",
Help: `This takes the following parameters Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:path/to/dir" - fs - a remote name string e.g. "drive:path/to/dir"
Returns Returns:
- count - number of files - count - number of files
- bytes - number of bytes in those files - bytes - number of bytes in those files
@ -360,14 +360,14 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcPublicLink, Fn: rcPublicLink,
Title: "Create or retrieve a public link to the given file or folder.", Title: "Create or retrieve a public link to the given file or folder.",
Help: `This takes the following parameters Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:" - fs - a remote name string e.g. "drive:"
- remote - a path within that remote e.g. "dir" - remote - a path within that remote e.g. "dir"
- unlink - boolean - if set removes the link rather than adding it (optional) - unlink - boolean - if set removes the link rather than adding it (optional)
- expire - string - the expiry time of the link e.g. "1d" (optional) - expire - string - the expiry time of the link e.g. "1d" (optional)
Returns Returns:
- url - URL of the resource - url - URL of the resource
@ -401,7 +401,7 @@ func init() {
Path: "operations/fsinfo", Path: "operations/fsinfo",
Fn: rcFsInfo, Fn: rcFsInfo,
Title: "Return information about the remote", Title: "Return information about the remote",
Help: `This takes the following parameters Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:" - fs - a remote name string e.g. "drive:"
@ -480,18 +480,18 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcBackend, Fn: rcBackend,
Title: "Runs a backend command.", Title: "Runs a backend command.",
Help: `This takes the following parameters Help: `This takes the following parameters:
- command - a string with the command name - command - a string with the command name
- fs - a remote name string e.g. "drive:" - fs - a remote name string e.g. "drive:"
- arg - a list of arguments for the backend command - arg - a list of arguments for the backend command
- opt - a map of string to string of options - opt - a map of string to string of options
Returns Returns:
- result - result from the backend command - result - result from the backend command
For example Example:
rclone rc backend/command command=noop fs=. -o echo=yes -o blue -a path1 -a path2 rclone rc backend/command command=noop fs=. -o echo=yes -o blue -a path1 -a path2

View File

@ -34,7 +34,7 @@ func init() {
Path: "options/blocks", Path: "options/blocks",
Fn: rcOptionsBlocks, Fn: rcOptionsBlocks,
Title: "List all the option blocks", Title: "List all the option blocks",
Help: `Returns Help: `Returns:
- options - a list of the options block names`, - options - a list of the options block names`,
}) })
} }
@ -112,7 +112,7 @@ func init() {
Path: "options/set", Path: "options/set",
Fn: rcOptionsSet, Fn: rcOptionsSet,
Title: "Set an option", Title: "Set an option",
Help: `Parameters Help: `Parameters:
- option block name containing an object with - option block name containing an object with
- key: value - key: value

View File

@ -110,10 +110,10 @@ are explained in the go docs: https://golang.org/pkg/runtime/#MemStats
The most interesting values for most people are: The most interesting values for most people are:
* HeapAlloc: This is the amount of memory rclone is actually using - HeapAlloc - this is the amount of memory rclone is actually using
* HeapSys: This is the amount of memory rclone has obtained from the OS - HeapSys - this is the amount of memory rclone has obtained from the OS
* Sys: this is the total amount of memory requested from the OS - Sys - this is the total amount of memory requested from the OS
* It is virtual memory so may include unused memory - It is virtual memory so may include unused memory
`, `,
}) })
} }
@ -171,7 +171,7 @@ func init() {
Fn: rcVersion, Fn: rcVersion,
Title: "Shows the current version of rclone and the go runtime.", Title: "Shows the current version of rclone and the go runtime.",
Help: ` Help: `
This shows the current version of go and the go runtime This shows the current version of go and the go runtime:
- version - rclone version, e.g. "v1.53.0" - version - rclone version, e.g. "v1.53.0"
- decomposed - version number as [major, minor, patch] - decomposed - version number as [major, minor, patch]
@ -217,7 +217,7 @@ func init() {
Pass a clear string and rclone will obscure it for the config file: Pass a clear string and rclone will obscure it for the config file:
- clear - string - clear - string
Returns Returns:
- obscured - string - obscured - string
`, `,
}) })
@ -245,7 +245,7 @@ func init() {
Fn: rcQuit, Fn: rcQuit,
Title: "Terminates the app.", Title: "Terminates the app.",
Help: ` Help: `
(optional) Pass an exit code to be used for terminating the app: (Optional) Pass an exit code to be used for terminating the app:
- exitCode - int - exitCode - int
`, `,
}) })
@ -289,11 +289,11 @@ Once this is set you can look use this to profile the mutex contention:
go tool pprof http://localhost:5572/debug/pprof/mutex go tool pprof http://localhost:5572/debug/pprof/mutex
Parameters Parameters:
- rate - int - rate - int
Results Results:
- previousRate - int - previousRate - int
`, `,
@ -329,7 +329,7 @@ After calling this you can use this to see the blocking profile:
go tool pprof http://localhost:5572/debug/pprof/block go tool pprof http://localhost:5572/debug/pprof/block
Parameters Parameters:
- rate - int - rate - int
`, `,
@ -354,29 +354,29 @@ func init() {
NeedsRequest: true, NeedsRequest: true,
NeedsResponse: true, NeedsResponse: true,
Title: "Run a rclone terminal command over rc.", Title: "Run a rclone terminal command over rc.",
Help: `This takes the following parameters Help: `This takes the following parameters:
- command - a string with the command name - command - a string with the command name.
- arg - a list of arguments for the backend command - arg - a list of arguments for the backend command.
- opt - a map of string to string of options - opt - a map of string to string of options.
- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR") - returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR").
- defaults to "COMBINED_OUTPUT" if not set - Defaults to "COMBINED_OUTPUT" if not set.
- the STREAM returnTypes will write the output to the body of the HTTP message - The STREAM returnTypes will write the output to the body of the HTTP message.
- the COMBINED_OUTPUT will write the output to the "result" parameter - The COMBINED_OUTPUT will write the output to the "result" parameter.
Returns Returns:
- result - result from the backend command - result - result from the backend command.
- only set when using returnType "COMBINED_OUTPUT" - Only set when using returnType "COMBINED_OUTPUT".
- error - set if rclone exits with an error code - error - set if rclone exits with an error code.
- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR") - returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR").
For example Example:
rclone rc core/command command=ls -a mydrive:/ -o max-depth=1 rclone rc core/command command=ls -a mydrive:/ -o max-depth=1
rclone rc core/command -a ls -a mydrive:/ -o max-depth=1 rclone rc core/command -a ls -a mydrive:/ -o max-depth=1
Returns Returns:
` + "```" + ` ` + "```" + `
{ {

View File

@ -317,11 +317,11 @@ func init() {
Path: "job/status", Path: "job/status",
Fn: rcJobStatus, Fn: rcJobStatus,
Title: "Reads the status of the job ID", Title: "Reads the status of the job ID",
Help: `Parameters Help: `Parameters:
- jobid - id of the job (integer) - jobid - id of the job (integer).
Results Results:
- finished - boolean - finished - boolean
- duration - time in seconds that the job ran for - duration - time in seconds that the job ran for
@ -362,11 +362,11 @@ func init() {
Path: "job/list", Path: "job/list",
Fn: rcJobList, Fn: rcJobList,
Title: "Lists the IDs of the running jobs", Title: "Lists the IDs of the running jobs",
Help: `Parameters - None Help: `Parameters: None.
Results Results:
- jobids - array of integer job ids - jobids - array of integer job ids.
`, `,
}) })
} }
@ -383,9 +383,9 @@ func init() {
Path: "job/stop", Path: "job/stop",
Fn: rcJobStop, Fn: rcJobStop,
Title: "Stop the running job", Title: "Stop the running job",
Help: `Parameters Help: `Parameters:
- jobid - id of the job (integer) - jobid - id of the job (integer).
`, `,
}) })
} }

View File

@ -16,16 +16,16 @@ var (
// AddFlags adds the remote control flags to the flagSet // AddFlags adds the remote control flags to the flagSet
func AddFlags(flagSet *pflag.FlagSet) { func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("rc", &Opt) rc.AddOption("rc", &Opt)
flags.BoolVarP(flagSet, &Opt.Enabled, "rc", "", false, "Enable the remote control server.") flags.BoolVarP(flagSet, &Opt.Enabled, "rc", "", false, "Enable the remote control server")
flags.StringVarP(flagSet, &Opt.Files, "rc-files", "", "", "Path to local files to serve on the HTTP server.") flags.StringVarP(flagSet, &Opt.Files, "rc-files", "", "", "Path to local files to serve on the HTTP server")
flags.BoolVarP(flagSet, &Opt.Serve, "rc-serve", "", false, "Enable the serving of remote objects.") flags.BoolVarP(flagSet, &Opt.Serve, "rc-serve", "", false, "Enable the serving of remote objects")
flags.BoolVarP(flagSet, &Opt.NoAuth, "rc-no-auth", "", false, "Don't require auth for certain methods.") flags.BoolVarP(flagSet, &Opt.NoAuth, "rc-no-auth", "", false, "Don't require auth for certain methods")
flags.BoolVarP(flagSet, &Opt.WebUI, "rc-web-gui", "", false, "Launch WebGUI on localhost") flags.BoolVarP(flagSet, &Opt.WebUI, "rc-web-gui", "", false, "Launch WebGUI on localhost")
flags.BoolVarP(flagSet, &Opt.WebGUIUpdate, "rc-web-gui-update", "", false, "Check and update to latest version of web gui") flags.BoolVarP(flagSet, &Opt.WebGUIUpdate, "rc-web-gui-update", "", false, "Check and update to latest version of web gui")
flags.BoolVarP(flagSet, &Opt.WebGUIForceUpdate, "rc-web-gui-force-update", "", false, "Force update to latest version of web gui") flags.BoolVarP(flagSet, &Opt.WebGUIForceUpdate, "rc-web-gui-force-update", "", false, "Force update to latest version of web gui")
flags.BoolVarP(flagSet, &Opt.WebGUINoOpenBrowser, "rc-web-gui-no-open-browser", "", false, "Don't open the browser automatically") flags.BoolVarP(flagSet, &Opt.WebGUINoOpenBrowser, "rc-web-gui-no-open-browser", "", false, "Don't open the browser automatically")
flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest", "URL to fetch the releases for webgui.") flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest", "URL to fetch the releases for webgui")
flags.StringVarP(flagSet, &Opt.AccessControlAllowOrigin, "rc-allow-origin", "", "", "Set the allowed origin for CORS.") flags.StringVarP(flagSet, &Opt.AccessControlAllowOrigin, "rc-allow-origin", "", "", "Set the allowed origin for CORS")
flags.BoolVarP(flagSet, &Opt.EnableMetrics, "rc-enable-metrics", "", false, "Enable prometheus metrics on /metrics") flags.BoolVarP(flagSet, &Opt.EnableMetrics, "rc-enable-metrics", "", false, "Enable prometheus metrics on /metrics")
flags.DurationVarP(flagSet, &Opt.JobExpireDuration, "rc-job-expire-duration", "", Opt.JobExpireDuration, "expire finished async jobs older than this value") flags.DurationVarP(flagSet, &Opt.JobExpireDuration, "rc-job-expire-duration", "", Opt.JobExpireDuration, "expire finished async jobs older than this value")
flags.DurationVarP(flagSet, &Opt.JobExpireInterval, "rc-job-expire-interval", "", Opt.JobExpireInterval, "interval to check for expired async jobs") flags.DurationVarP(flagSet, &Opt.JobExpireInterval, "rc-job-expire-interval", "", Opt.JobExpireInterval, "interval to check for expired async jobs")

View File

@ -16,13 +16,13 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcListTestPlugins, Fn: rcListTestPlugins,
Title: "Show currently loaded test plugins", Title: "Show currently loaded test plugins",
Help: `allows listing of test plugins with the rclone.test set to true in package.json of the plugin Help: `Allows listing of test plugins with the rclone.test set to true in package.json of the plugin.
This takes no parameters and returns This takes no parameters and returns:
- loadedTestPlugins: list of currently available test plugins - loadedTestPlugins - list of currently available test plugins.
Eg E.g.
rclone rc pluginsctl/listTestPlugins rclone rc pluginsctl/listTestPlugins
`, `,
@ -45,13 +45,13 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcRemoveTestPlugin, Fn: rcRemoveTestPlugin,
Title: "Remove a test plugin", Title: "Remove a test plugin",
Help: `This allows you to remove a plugin using it's name Help: `This allows you to remove a plugin using it's name.
This takes the following parameters This takes the following parameters:
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + ` - name - name of the plugin in the format ` + "`author`/`plugin_name`" + `.
Eg Example:
rclone rc pluginsctl/removeTestPlugin name=rclone/rclone-webui-react rclone rc pluginsctl/removeTestPlugin name=rclone/rclone-webui-react
`, `,
@ -79,13 +79,13 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcAddPlugin, Fn: rcAddPlugin,
Title: "Add a plugin using url", Title: "Add a plugin using url",
Help: `used for adding a plugin to the webgui Help: `Used for adding a plugin to the webgui.
This takes the following parameters This takes the following parameters:
- url: http url of the github repo where the plugin is hosted (http://github.com/rclone/rclone-webui-react) - url - http url of the github repo where the plugin is hosted (http://github.com/rclone/rclone-webui-react).
Eg Example:
rclone rc pluginsctl/addPlugin rclone rc pluginsctl/addPlugin
`, `,
@ -191,12 +191,12 @@ func init() {
Title: "Get the list of currently loaded plugins", Title: "Get the list of currently loaded plugins",
Help: `This allows you to get the currently enabled plugins and their details. Help: `This allows you to get the currently enabled plugins and their details.
This takes no parameters and returns This takes no parameters and returns:
- loadedPlugins: list of current production plugins - loadedPlugins - list of current production plugins.
- testPlugins: list of temporarily loaded development plugins, usually running on a different server. - testPlugins - list of temporarily loaded development plugins, usually running on a different server.
Eg E.g.
rclone rc pluginsctl/listPlugins rclone rc pluginsctl/listPlugins
`, `,
@ -224,13 +224,13 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcRemovePlugin, Fn: rcRemovePlugin,
Title: "Remove a loaded plugin", Title: "Remove a loaded plugin",
Help: `This allows you to remove a plugin using it's name Help: `This allows you to remove a plugin using it's name.
This takes parameters This takes parameters:
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + ` - name - name of the plugin in the format ` + "`author`/`plugin_name`" + `.
Eg E.g.
rclone rc pluginsctl/removePlugin name=rclone/video-plugin rclone rc pluginsctl/removePlugin name=rclone/video-plugin
`, `,
@ -260,19 +260,19 @@ func init() {
AuthRequired: true, AuthRequired: true,
Fn: rcGetPluginsForType, Fn: rcGetPluginsForType,
Title: "Get plugins with type criteria", Title: "Get plugins with type criteria",
Help: `This shows all possible plugins by a mime type Help: `This shows all possible plugins by a mime type.
This takes the following parameters This takes the following parameters:
- type: supported mime type by a loaded plugin e.g. (video/mp4, audio/mp3) - type - supported mime type by a loaded plugin e.g. (video/mp4, audio/mp3).
- pluginType: filter plugins based on their type e.g. (DASHBOARD, FILE_HANDLER, TERMINAL) - pluginType - filter plugins based on their type e.g. (DASHBOARD, FILE_HANDLER, TERMINAL).
and returns Returns:
- loadedPlugins: list of current production plugins - loadedPlugins - list of current production plugins.
- testPlugins: list of temporarily loaded development plugins, usually running on a different server. - testPlugins - list of temporarily loaded development plugins, usually running on a different server.
Eg Example:
rclone rc pluginsctl/getPluginsForType type=video/mp4 rclone rc pluginsctl/getPluginsForType type=video/mp4
`, `,

View File

@ -20,7 +20,7 @@ func init() {
return rcSyncCopyMove(ctx, in, name) return rcSyncCopyMove(ctx, in, name)
}, },
Title: name + " a directory from source remote to destination remote", Title: name + " a directory from source remote to destination remote",
Help: `This takes the following parameters Help: `This takes the following parameters:
- srcFs - a remote name string e.g. "drive:src" for the source - srcFs - a remote name string e.g. "drive:src" for the source
- dstFs - a remote name string e.g. "drive:dst" for the destination - dstFs - a remote name string e.g. "drive:dst" for the destination

2
lib/env/env.go vendored
View File

@ -9,7 +9,7 @@ import (
) )
// ShellExpandHelp describes what ShellExpand does for inclusion into help // ShellExpandHelp describes what ShellExpand does for inclusion into help
const ShellExpandHelp = "\n\nLeading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.\n" const ShellExpandHelp = "\n\nLeading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`."
// ShellExpand replaces a leading "~" with the home directory" and // ShellExpand replaces a leading "~" with the home directory" and
// expands all environment variables afterwards. // expands all environment variables afterwards.

View File

@ -70,10 +70,10 @@ var (
// AddFlagsPrefix adds flags for http/auth // AddFlagsPrefix adds flags for http/auth
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) { func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done") flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "A htpasswd file - if not provided no authentication is done")
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication") flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "Realm for authentication")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.") flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.") flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
flags.StringVarP(flagSet, &Opt.Salt, prefix+"salt", "", Opt.Salt, "Password hashing salt") flags.StringVarP(flagSet, &Opt.Salt, prefix+"salt", "", Opt.Salt, "Password hashing salt")
} }

View File

@ -378,14 +378,14 @@ func URL() string {
// AddFlagsPrefix adds flags for the httplib // AddFlagsPrefix adds flags for the httplib
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) { func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.") flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to")
flags.DurationVarP(flagSet, &Opt.ServerReadTimeout, prefix+"server-read-timeout", "", Opt.ServerReadTimeout, "Timeout for server reading data") flags.DurationVarP(flagSet, &Opt.ServerReadTimeout, prefix+"server-read-timeout", "", Opt.ServerReadTimeout, "Timeout for server reading data")
flags.DurationVarP(flagSet, &Opt.ServerWriteTimeout, prefix+"server-write-timeout", "", Opt.ServerWriteTimeout, "Timeout for server writing data") flags.DurationVarP(flagSet, &Opt.ServerWriteTimeout, prefix+"server-write-timeout", "", Opt.ServerWriteTimeout, "Timeout for server writing data")
flags.IntVarP(flagSet, &Opt.MaxHeaderBytes, prefix+"max-header-bytes", "", Opt.MaxHeaderBytes, "Maximum size of request header") flags.IntVarP(flagSet, &Opt.MaxHeaderBytes, prefix+"max-header-bytes", "", Opt.MaxHeaderBytes, "Maximum size of request header")
flags.StringVarP(flagSet, &Opt.SslCert, prefix+"cert", "", Opt.SslCert, "SSL PEM key (concatenation of certificate and CA certificate)") flags.StringVarP(flagSet, &Opt.SslCert, prefix+"cert", "", Opt.SslCert, "SSL PEM key (concatenation of certificate and CA certificate)")
flags.StringVarP(flagSet, &Opt.SslKey, prefix+"key", "", Opt.SslKey, "SSL PEM Private key") flags.StringVarP(flagSet, &Opt.SslKey, prefix+"key", "", Opt.SslKey, "SSL PEM Private key")
flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with") flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root.") flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")
} }

View File

@ -76,21 +76,21 @@ All done. Please go back to rclone.
// SharedOptions are shared between backends the utilize an OAuth flow // SharedOptions are shared between backends the utilize an OAuth flow
var SharedOptions = []fs.Option{{ var SharedOptions = []fs.Option{{
Name: config.ConfigClientID, Name: config.ConfigClientID,
Help: "OAuth Client Id\nLeave blank normally.", Help: "OAuth Client Id.\n\nLeave blank normally.",
}, { }, {
Name: config.ConfigClientSecret, Name: config.ConfigClientSecret,
Help: "OAuth Client Secret\nLeave blank normally.", Help: "OAuth Client Secret.\n\nLeave blank normally.",
}, { }, {
Name: config.ConfigToken, Name: config.ConfigToken,
Help: "OAuth Access Token as a JSON blob.", Help: "OAuth Access Token as a JSON blob.",
Advanced: true, Advanced: true,
}, { }, {
Name: config.ConfigAuthURL, Name: config.ConfigAuthURL,
Help: "Auth server URL.\nLeave blank to use the provider defaults.", Help: "Auth server URL.\n\nLeave blank to use the provider defaults.",
Advanced: true, Advanced: true,
}, { }, {
Name: config.ConfigTokenURL, Name: config.ConfigTokenURL,
Help: "Token server url.\nLeave blank to use the provider defaults.", Help: "Token server url.\n\nLeave blank to use the provider defaults.",
Advanced: true, Advanced: true,
}} }}

View File

@ -30,8 +30,8 @@ directory should be considered up to date and not refreshed from the
backend. Changes made through the mount will appear immediately or backend. Changes made through the mount will appear immediately or
invalidate the cache. invalidate the cache.
--dir-cache-time duration Time to cache directory entries for. (default 5m0s) --dir-cache-time duration Time to cache directory entries for (default 5m0s)
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s) --poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
However, changes made directly on the cloud storage by the web However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once interface or a different copy of rclone will only be picked up once
@ -85,10 +85,10 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching. --cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s) --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s) --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s) --vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
If run with !-vv! rclone will print the location of the file cache. The If run with !-vv! rclone will print the location of the file cache. The
files are stored in the user cache file area which is OS dependent but files are stored in the user cache file area which is OS dependent but
@ -231,14 +231,14 @@ than seeking rclone will wait a short time for the in sequence read or
write to come in. These flags only come into effect when not using an write to come in. These flags only come into effect when not using an
on disk cache file. on disk cache file.
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms) --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s) --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
When using VFS write caching (!--vfs-cache-mode! with value writes or full), When using VFS write caching (!--vfs-cache-mode! with value writes or full),
the global flag !--transfers! can be set to adjust the number of parallel uploads of the global flag !--transfers! can be set to adjust the number of parallel uploads of
modified files from cache (the related global flag !--checkers! have no effect on mount). modified files from cache (the related global flag !--checkers! have no effect on mount).
--transfers int Number of file transfers to run in parallel. (default 4) --transfers int Number of file transfers to run in parallel (default 4)
### VFS Case Sensitivity ### VFS Case Sensitivity

View File

@ -18,25 +18,25 @@ var (
// AddFlags adds the non filing system specific flags to the command // AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) { func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("vfs", &Opt) rc.AddOption("vfs", &Opt)
flags.BoolVarP(flagSet, &Opt.NoModTime, "no-modtime", "", Opt.NoModTime, "Don't read/write the modification time (can speed things up).") flags.BoolVarP(flagSet, &Opt.NoModTime, "no-modtime", "", Opt.NoModTime, "Don't read/write the modification time (can speed things up)")
flags.BoolVarP(flagSet, &Opt.NoChecksum, "no-checksum", "", Opt.NoChecksum, "Don't compare checksums on up/download.") flags.BoolVarP(flagSet, &Opt.NoChecksum, "no-checksum", "", Opt.NoChecksum, "Don't compare checksums on up/download")
flags.BoolVarP(flagSet, &Opt.NoSeek, "no-seek", "", Opt.NoSeek, "Don't allow seeking in files.") flags.BoolVarP(flagSet, &Opt.NoSeek, "no-seek", "", Opt.NoSeek, "Don't allow seeking in files")
flags.DurationVarP(flagSet, &Opt.DirCacheTime, "dir-cache-time", "", Opt.DirCacheTime, "Time to cache directory entries for.") flags.DurationVarP(flagSet, &Opt.DirCacheTime, "dir-cache-time", "", Opt.DirCacheTime, "Time to cache directory entries for")
flags.DurationVarP(flagSet, &Opt.PollInterval, "poll-interval", "", Opt.PollInterval, "Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable.") flags.DurationVarP(flagSet, &Opt.PollInterval, "poll-interval", "", Opt.PollInterval, "Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable)")
flags.BoolVarP(flagSet, &Opt.ReadOnly, "read-only", "", Opt.ReadOnly, "Mount read-only.") flags.BoolVarP(flagSet, &Opt.ReadOnly, "read-only", "", Opt.ReadOnly, "Mount read-only")
flags.FVarP(flagSet, &Opt.CacheMode, "vfs-cache-mode", "", "Cache mode off|minimal|writes|full") flags.FVarP(flagSet, &Opt.CacheMode, "vfs-cache-mode", "", "Cache mode off|minimal|writes|full")
flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects.") flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects")
flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max age of objects in the cache.") flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max age of objects in the cache")
flags.FVarP(flagSet, &Opt.CacheMaxSize, "vfs-cache-max-size", "", "Max total size of objects in the cache.") flags.FVarP(flagSet, &Opt.CacheMaxSize, "vfs-cache-max-size", "", "Max total size of objects in the cache")
flags.FVarP(flagSet, &Opt.ChunkSize, "vfs-read-chunk-size", "", "Read the source objects in chunks.") flags.FVarP(flagSet, &Opt.ChunkSize, "vfs-read-chunk-size", "", "Read the source objects in chunks")
flags.FVarP(flagSet, &Opt.ChunkSizeLimit, "vfs-read-chunk-size-limit", "", "If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited.") flags.FVarP(flagSet, &Opt.ChunkSizeLimit, "vfs-read-chunk-size-limit", "", "If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited)")
flags.FVarP(flagSet, DirPerms, "dir-perms", "", "Directory permissions") flags.FVarP(flagSet, DirPerms, "dir-perms", "", "Directory permissions")
flags.FVarP(flagSet, FilePerms, "file-perms", "", "File permissions") flags.FVarP(flagSet, FilePerms, "file-perms", "", "File permissions")
flags.BoolVarP(flagSet, &Opt.CaseInsensitive, "vfs-case-insensitive", "", Opt.CaseInsensitive, "If a file name not found, find a case insensitive match.") flags.BoolVarP(flagSet, &Opt.CaseInsensitive, "vfs-case-insensitive", "", Opt.CaseInsensitive, "If a file name not found, find a case insensitive match")
flags.DurationVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", Opt.WriteWait, "Time to wait for in-sequence write before giving error.") flags.DurationVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", Opt.WriteWait, "Time to wait for in-sequence write before giving error")
flags.DurationVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", Opt.ReadWait, "Time to wait for in-sequence read before seeking.") flags.DurationVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", Opt.ReadWait, "Time to wait for in-sequence read before seeking")
flags.DurationVarP(flagSet, &Opt.WriteBack, "vfs-write-back", "", Opt.WriteBack, "Time to writeback files after last use when using cache.") flags.DurationVarP(flagSet, &Opt.WriteBack, "vfs-write-back", "", Opt.WriteBack, "Time to writeback files after last use when using cache")
flags.FVarP(flagSet, &Opt.ReadAhead, "vfs-read-ahead", "", "Extra read ahead over --buffer-size when using cache-mode full.") flags.FVarP(flagSet, &Opt.ReadAhead, "vfs-read-ahead", "", "Extra read ahead over --buffer-size when using cache-mode full")
flags.BoolVarP(flagSet, &Opt.UsedIsSize, "vfs-used-is-size", "", Opt.UsedIsSize, "Use the `rclone size` algorithm for Used size.") flags.BoolVarP(flagSet, &Opt.UsedIsSize, "vfs-used-is-size", "", Opt.UsedIsSize, "Use the `rclone size` algorithm for Used size")
platformFlags(flagSet) platformFlags(flagSet)
} }

View File

@ -13,9 +13,9 @@ import (
func platformFlags(flagSet *pflag.FlagSet) { func platformFlags(flagSet *pflag.FlagSet) {
Opt.Umask = unix.Umask(0) // read the umask Opt.Umask = unix.Umask(0) // read the umask
unix.Umask(Opt.Umask) // set it back to what it was unix.Umask(Opt.Umask) // set it back to what it was
flags.IntVarP(flagSet, &Opt.Umask, "umask", "", Opt.Umask, "Override the permission bits set by the filesystem. Not supported on Windows.") flags.IntVarP(flagSet, &Opt.Umask, "umask", "", Opt.Umask, "Override the permission bits set by the filesystem (not supported on Windows)")
Opt.UID = uint32(unix.Geteuid()) Opt.UID = uint32(unix.Geteuid())
Opt.GID = uint32(unix.Getegid()) Opt.GID = uint32(unix.Getegid())
flags.Uint32VarP(flagSet, &Opt.UID, "uid", "", Opt.UID, "Override the uid field set by the filesystem. Not supported on Windows.") flags.Uint32VarP(flagSet, &Opt.UID, "uid", "", Opt.UID, "Override the uid field set by the filesystem (not supported on Windows)")
flags.Uint32VarP(flagSet, &Opt.GID, "gid", "", Opt.GID, "Override the gid field set by the filesystem. Not supported on Windows.") flags.Uint32VarP(flagSet, &Opt.GID, "gid", "", Opt.GID, "Override the gid field set by the filesystem (not supported on Windows)")
} }