From a9273c5da5fbf32aaa4d40c9a2f6af39bc7edffb Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Mon, 1 Oct 2018 18:36:15 +0100 Subject: [PATCH] docs: move documentation for options from docs/content into backends In the following commit, the documentation will be autogenerated. --- backend/amazonclouddrive/amazonclouddrive.go | 37 +++- backend/azureblob/azureblob.go | 35 +++- backend/b2/b2.go | 33 +++- backend/box/box.go | 2 +- backend/cache/cache.go | 165 +++++++++++++----- backend/crypt/crypt.go | 12 +- backend/drive/drive.go | 81 ++++++--- backend/dropbox/dropbox.go | 11 +- backend/jottacloud/jottacloud.go | 2 +- backend/local/local.go | 26 ++- backend/mega/mega.go | 15 +- backend/onedrive/onedrive.go | 17 +- backend/s3/s3.go | 37 +++- backend/sftp/sftp.go | 17 +- backend/swift/swift.go | 16 +- backend/union/union.go | 2 +- docs/content/alias.md | 2 + docs/content/amazonclouddrive.md | 39 +---- docs/content/azureblob.md | 35 +--- docs/content/b2.md | 46 +---- docs/content/box.md | 15 +- docs/content/cache.md | 167 +------------------ docs/content/crypt.md | 16 +- docs/content/drive.md | 101 +---------- docs/content/dropbox.md | 16 +- docs/content/ftp.md | 3 + docs/content/googlecloudstorage.md | 3 + docs/content/http.md | 3 + docs/content/hubic.md | 3 + docs/content/jottacloud.md | 24 +-- docs/content/local.md | 44 ++--- docs/content/mega.md | 18 +- docs/content/onedrive.md | 18 +- docs/content/opendrive.md | 9 +- docs/content/pcloud.md | 3 + docs/content/qingstor.md | 2 + docs/content/s3.md | 52 +----- docs/content/sftp.md | 25 +-- docs/content/swift.md | 16 +- docs/content/union.md | 2 + docs/content/webdav.md | 3 + docs/content/yandex.md | 3 + 42 files changed, 474 insertions(+), 702 deletions(-) diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go index d5932fae4..152201604 100644 --- a/backend/amazonclouddrive/amazonclouddrive.go +++ b/backend/amazonclouddrive/amazonclouddrive.go @@ -97,13 +97,42 @@ func init() { Hide: fs.OptionHideBoth, Advanced: true, }, { - Name: "upload_wait_per_gb", - Help: "Additional time per GB to wait after a failed complete upload to see if it appears.", + Name: "upload_wait_per_gb", + Help: `Additional time per GB to wait after a failed complete upload to see if it appears. + +Sometimes Amazon Drive gives an error when a file has been fully +uploaded but the file appears anyway after a little while. This +happens sometimes for files over 1GB in size and nearly every time for +files bigger than 10GB. This parameter controls the time rclone waits +for the file to appear. + +The default value for this parameter is 3 minutes per GB, so by +default it will wait 3 minutes for every GB uploaded to see if the +file appears. + +You can disable this feature by setting it to 0. This may cause +conflict errors as rclone retries the failed upload but the file will +most likely appear correctly eventually. + +These values were determined empirically by observing lots of uploads +of big files for a range of file sizes. + +Upload with the "-v" flag to see more info about what rclone is doing +in this situation.`, Default: fs.Duration(180 * time.Second), Advanced: true, }, { - Name: "templink_threshold", - Help: "Files >= this size will be downloaded via their tempLink.", + Name: "templink_threshold", + Help: `Files >= this size will be downloaded via their tempLink. + +Files this size or more will be downloaded via their "tempLink". This +is to work around a problem with Amazon Drive which blocks downloads +of files bigger than about 10GB. The default for this is 9GB which +shouldn't need to be changed. + +To download files above this threshold, rclone requests a "tempLink" +which downloads the file through a temporary URL directly from the +underlying S3 storage.`, Default: defaultTempLinkThreshold, Advanced: true, }}, diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 192a5236d..bb504ca1d 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -73,23 +73,44 @@ func init() { Advanced: true, }, { Name: "upload_cutoff", - Help: "Cutoff for switching to chunked upload.", + Help: "Cutoff for switching to chunked upload (<= 256MB).", Default: fs.SizeSuffix(defaultUploadCutoff), Advanced: true, }, { - Name: "chunk_size", - Help: "Upload chunk size. Must fit in memory.", + Name: "chunk_size", + Help: `Upload chunk size (<= 100MB). + +Note that this is stored in memory and there may be up to +"--transfers" chunks stored at once in memory.`, Default: fs.SizeSuffix(defaultChunkSize), Advanced: true, }, { - Name: "list_chunk", - Help: "Size of blob list.", + Name: "list_chunk", + Help: `Size of blob list. + +This sets the number of blobs requested in each listing chunk. Default +is the maximum, 5000. "List blobs" requests are permitted 2 minutes +per megabyte to complete. If an operation is taking longer than 2 +minutes per megabyte on average, it will time out ( +[source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval) +). This can be used to limit the number of blobs items to return, to +avoid the time out.`, Default: maxListChunkSize, Advanced: true, }, { Name: "access_tier", - Help: "Access tier of blob, supports hot, cool and archive tiers.\nArchived blobs can be restored by setting access tier to hot or cool." + - " Leave blank if you intend to use default access tier, which is set at account level", + Help: `Access tier of blob: hot, cool or archive. + +Archived blobs can be restored by setting access tier to hot or +cool. Leave blank if you intend to use default access tier, which is +set at account level + +If there is no "access tier" specified, rclone doesn't apply any tier. +rclone performs "Set Tier" operation on blobs while uploading, if objects +are not modified, specifying "access tier" to new one will have no effect. +If blobs are in "archive tier" at remote, trying to perform data transfer +operations from remote will not be allowed. User should first restore by +tiering blob to "Hot" or "Cool".`, Advanced: true, }}, }) diff --git a/backend/b2/b2.go b/backend/b2/b2.go index ab9c6db8c..cde1e5f6b 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -77,14 +77,24 @@ func init() { Help: "Endpoint for the service.\nLeave blank normally.", Advanced: true, }, { - Name: "test_mode", - Help: "A flag string for X-Bz-Test-Mode header for debugging.", + Name: "test_mode", + Help: `A flag string for X-Bz-Test-Mode header for debugging. + +This is for debugging purposes only. Setting it to one of the strings +below will cause b2 to return specific errors: + + * "fail_some_uploads" + * "expire_some_account_authorization_tokens" + * "force_cap_exceeded" + +These will be set in the "X-Bz-Test-Mode" header which is documented +in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`, Default: "", Hide: fs.OptionHideConfigurator, Advanced: true, }, { Name: "versions", - Help: "Include old versions in directory listings.", + Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.", Default: false, Advanced: true, }, { @@ -92,13 +102,22 @@ func init() { Help: "Permanently delete files on remote removal, otherwise hide files.", Default: false, }, { - Name: "upload_cutoff", - Help: "Cutoff for switching to chunked upload.", + Name: "upload_cutoff", + Help: `Cutoff for switching to chunked upload. + +Files above this size will be uploaded in chunks of "--b2-chunk-size". + +This value should be set no larger than 4.657GiB (== 5GB).`, Default: fs.SizeSuffix(defaultUploadCutoff), Advanced: true, }, { - Name: "chunk_size", - Help: "Upload chunk size. Must fit in memory.", + Name: "chunk_size", + Help: `Upload chunk size. Must fit in memory. + +When uploading large files, chunk the file into this size. Note that +these chunks are buffered in memory and there might a maximum of +"--transfers" chunks in progress at once. 5,000,000 Bytes is the +minimim size.`, Default: fs.SizeSuffix(defaultChunkSize), Advanced: true, }}, diff --git a/backend/box/box.go b/backend/box/box.go index c6e9b5fc8..f814e1eea 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -85,7 +85,7 @@ func init() { Help: "Box App Client Secret\nLeave blank normally.", }, { Name: "upload_cutoff", - Help: "Cutoff for switching to multipart upload.", + Help: "Cutoff for switching to multipart upload (>= 50MB).", Default: fs.SizeSuffix(defaultUploadCutoff), Advanced: true, }, { diff --git a/backend/cache/cache.go b/backend/cache/cache.go index 2caaf4069..7ce38d7ea 100644 --- a/backend/cache/cache.go +++ b/backend/cache/cache.go @@ -87,8 +87,12 @@ func init() { Help: "Skip all certificate verifications when connecting to the Plex server", Advanced: true, }, { - Name: "chunk_size", - Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.", + Name: "chunk_size", + Help: `The size of a chunk (partial file data). + +Use lower numbers for slower connections. If the chunk size is +changed, any downloaded chunks will be invalid and cache-chunk-path +will need to be cleared or unexpected EOF errors will occur.`, Default: DefCacheChunkSize, Examples: []fs.OptionExample{{ Value: "1m", @@ -101,8 +105,10 @@ func init() { Help: "10 MB", }}, }, { - Name: "info_age", - Help: "How much time should object info (file size, file hashes etc) be stored in cache.\nUse a very high value if you don't plan on changing the source FS from outside the cache.\nAccepted units are: \"s\", \"m\", \"h\".", + Name: "info_age", + Help: `How long to cache file structure information (directory listings, file size, times etc). +If all write operations are done through the cache then you can safely make +this value very large as the cache store will also be updated in real time.`, Default: DefCacheInfoAge, Examples: []fs.OptionExample{{ Value: "1h", @@ -115,8 +121,11 @@ func init() { Help: "48 hours", }}, }, { - Name: "chunk_total_size", - Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.", + Name: "chunk_total_size", + Help: `The total size that the chunks can take up on the local disk. + +If the cache exceeds this value then it will start to delete the +oldest chunks until it goes under this value.`, Default: DefCacheTotalChunkSize, Examples: []fs.OptionExample{{ Value: "500M", @@ -131,63 +140,143 @@ func init() { }, { Name: "db_path", Default: filepath.Join(config.CacheDir, "cache-backend"), - Help: "Directory to cache DB", + Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.", Advanced: true, }, { - Name: "chunk_path", - Default: filepath.Join(config.CacheDir, "cache-backend"), - Help: "Directory to cache chunk files", + Name: "chunk_path", + Default: filepath.Join(config.CacheDir, "cache-backend"), + Help: `Directory to cache chunk files. + +Path to where partial file data (chunks) are stored locally. The remote +name is appended to the final path. + +This config follows the "--cache-db-path". If you specify a custom +location for "--cache-db-path" and don't specify one for "--cache-chunk-path" +then "--cache-chunk-path" will use the same path as "--cache-db-path".`, Advanced: true, }, { Name: "db_purge", Default: false, - Help: "Purge the cache DB before", + Help: "Clear all the cached data for this remote on start.", Hide: fs.OptionHideConfigurator, Advanced: true, }, { - Name: "chunk_clean_interval", - Default: DefCacheChunkCleanInterval, - Help: "Interval at which chunk cleanup runs", + Name: "chunk_clean_interval", + Default: DefCacheChunkCleanInterval, + Help: `How often should the cache perform cleanups of the chunk storage. +The default value should be ok for most people. If you find that the +cache goes over "cache-chunk-total-size" too often then try to lower +this value to force it to perform cleanups more often.`, Advanced: true, }, { - Name: "read_retries", - Default: DefCacheReadRetries, - Help: "How many times to retry a read from a cache storage", + Name: "read_retries", + Default: DefCacheReadRetries, + Help: `How many times to retry a read from a cache storage. + +Since reading from a cache stream is independent from downloading file +data, readers can get to a point where there's no more data in the +cache. Most of the times this can indicate a connectivity issue if +cache isn't able to provide file data anymore. + +For really slow connections, increase this to a point where the stream is +able to provide data but your experience will be very stuttering.`, Advanced: true, }, { - Name: "workers", - Default: DefCacheTotalWorkers, - Help: "How many workers should run in parallel to download chunks", + Name: "workers", + Default: DefCacheTotalWorkers, + Help: `How many workers should run in parallel to download chunks. + +Higher values will mean more parallel processing (better CPU needed) +and more concurrent requests on the cloud provider. This impacts +several aspects like the cloud provider API limits, more stress on the +hardware that rclone runs on but it also means that streams will be +more fluid and data will be available much more faster to readers. + +**Note**: If the optional Plex integration is enabled then this +setting will adapt to the type of reading performed and the value +specified here will be used as a maximum number of workers to use.`, Advanced: true, }, { - Name: "chunk_no_memory", - Default: DefCacheChunkNoMemory, - Help: "Disable the in-memory cache for storing chunks during streaming", + Name: "chunk_no_memory", + Default: DefCacheChunkNoMemory, + Help: `Disable the in-memory cache for storing chunks during streaming. + +By default, cache will keep file data during streaming in RAM as well +to provide it to readers as fast as possible. + +This transient data is evicted as soon as it is read and the number of +chunks stored doesn't exceed the number of workers. However, depending +on other settings like "cache-chunk-size" and "cache-workers" this footprint +can increase if there are parallel streams too (multiple files being read +at the same time). + +If the hardware permits it, use this feature to provide an overall better +performance during streaming but it can also be disabled if RAM is not +available on the local machine.`, Advanced: true, }, { - Name: "rps", - Default: int(DefCacheRps), - Help: "Limits the number of requests per second to the source FS. -1 disables the rate limiter", + Name: "rps", + Default: int(DefCacheRps), + Help: `Limits the number of requests per second to the source FS (-1 to disable) + +This setting places a hard limit on the number of requests per second +that cache will be doing to the cloud provider remote and try to +respect that value by setting waits between reads. + +If you find that you're getting banned or limited on the cloud +provider through cache and know that a smaller number of requests per +second will allow you to work with it then you can use this setting +for that. + +A good balance of all the other settings should make this setting +useless but it is available to set for more special cases. + +**NOTE**: This will limit the number of requests during streams but +other API calls to the cloud provider like directory listings will +still pass.`, Advanced: true, }, { - Name: "writes", - Default: DefCacheWrites, - Help: "Will cache file data on writes through the FS", + Name: "writes", + Default: DefCacheWrites, + Help: `Cache file data on writes through the FS + +If you need to read files immediately after you upload them through +cache you can enable this flag to have their data stored in the +cache store at the same time during upload.`, Advanced: true, }, { - Name: "tmp_upload_path", - Default: "", - Help: "Directory to keep temporary files until they are uploaded to the cloud storage", + Name: "tmp_upload_path", + Default: "", + Help: `Directory to keep temporary files until they are uploaded. + +This is the path where cache will use as a temporary storage for new +files that need to be uploaded to the cloud provider. + +Specifying a value will enable this feature. Without it, it is +completely disabled and files will be uploaded directly to the cloud +provider`, Advanced: true, }, { - Name: "tmp_wait_time", - Default: DefCacheTmpWaitTime, - Help: "How long should files be stored in local cache before being uploaded", + Name: "tmp_wait_time", + Default: DefCacheTmpWaitTime, + Help: `How long should files be stored in local cache before being uploaded + +This is the duration that a file must wait in the temporary location +_cache-tmp-upload-path_ before it is selected for upload. + +Note that only one file is uploaded at a time and it can take longer +to start the upload if a queue formed for this purpose.`, Advanced: true, }, { - Name: "db_wait_time", - Default: DefCacheDbWaitTime, - Help: "How long to wait for the DB to be available - 0 is unlimited", + Name: "db_wait_time", + Default: DefCacheDbWaitTime, + Help: `How long to wait for the DB to be available - 0 is unlimited + +Only one process can have the DB open at any one time, so rclone waits +for this duration for the DB to become available before it gives an +error. + +If you set it to 0 then it will wait forever.`, Advanced: true, }}, }) diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index f6de2214d..363d153f4 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -67,8 +67,16 @@ func init() { Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.", IsPassword: true, }, { - Name: "show_mapping", - Help: "For all files listed show how the names encrypt.", + Name: "show_mapping", + Help: `For all files listed show how the names encrypt. + +If this flag is set then for each file that the remote is asked to +list, it will log (at level INFO) a line stating the decrypted file +name and the encrypted file name. + +This is so you can work out which encrypted names are which decrypted +names just in case you need to do something with the encrypted file +names, or for debugging purposes.`, Default: false, Hide: fs.OptionHideConfigurator, Advanced: true, diff --git a/backend/drive/drive.go b/backend/drive/drive.go index e06241820..4a661faa8 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -207,22 +207,29 @@ func init() { }, { Name: "use_trash", Default: true, - Help: "Send files to the trash instead of deleting permanently.", + Help: "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.", Advanced: true, }, { Name: "skip_gdocs", Default: false, - Help: "Skip google documents in all listings.", + Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.", Advanced: true, }, { - Name: "shared_with_me", - Default: false, - Help: "Only show files that are shared with me", + Name: "shared_with_me", + Default: false, + Help: `Only show files that are shared with me. + +Instructs rclone to operate on your "Shared with me" folder (where +Google Drive lets you access the files and folders others have shared +with you). + +This works both with the "list" (lsd, lsl, etc) and the "copy" +commands (copy, sync, etc), and with all other commands too.`, Advanced: true, }, { Name: "trashed_only", Default: false, - Help: "Only show files that are in the trash", + Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.", Advanced: true, }, { Name: "formats", @@ -246,9 +253,25 @@ func init() { Help: "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.", Advanced: true, }, { - Name: "use_created_date", - Default: false, - Help: "Use created date instead of modified date.", + Name: "use_created_date", + Default: false, + Help: `Use file created date instead of modified date., + +Useful when downloading data and you want the creation date used in +place of the last modified date. + +**WARNING**: This flag may have some unexpected consequences. + +When uploading to your drive all files will be overwritten unless they +haven't been modified since their creation. And the inverse will occur +while downloading. This side effect can be avoided by using the +"--checksum" flag. + +This feature was implemented to retain photos capture date as recorded +by google photos. You will first need to check the "Create a Google +Photos folder" option in your google drive settings. You can then copy +or move the photos locally and use the date the image was taken +(created) set as the modification date.`, Advanced: true, }, { Name: "list_chunk", @@ -261,9 +284,18 @@ func init() { Help: "Impersonate this user when using a service account.", Advanced: true, }, { - Name: "alternate_export", - Default: false, - Help: "Use alternate export URLs for google documents export.", + Name: "alternate_export", + Default: false, + Help: `Use alternate export URLs for google documents export., + +If this option is set this instructs rclone to use an alternate set of +export URLs for drive documents. Users have reported that the +official export URLs can't export large documents, whereas these +unofficial ones can. + +See rclone issue [#2243](https://github.com/ncw/rclone/issues/2243) for background, +[this google drive issue](https://issuetracker.google.com/issues/36761333) and +[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`, Advanced: true, }, { Name: "upload_cutoff", @@ -271,19 +303,30 @@ func init() { Help: "Cutoff for switching to chunked upload", Advanced: true, }, { - Name: "chunk_size", - Default: defaultChunkSize, - Help: "Upload chunk size. Must a power of 2 >= 256k.", + Name: "chunk_size", + Default: defaultChunkSize, + Help: `Upload chunk size. Must a power of 2 >= 256k. + +Making this larger will improve performance, but note that each chunk +is buffered in memory one per transfer. + +Reducing this will reduce memory usage but decrease performance.`, Advanced: true, }, { - Name: "acknowledge_abuse", - Default: false, - Help: "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.", + Name: "acknowledge_abuse", + Default: false, + Help: `Set to allow files which return cannotDownloadAbusiveFile to be downloaded. + +If downloading a file returns the error "This file has been identified +as malware or spam and cannot be downloaded" with the error code +"cannotDownloadAbusiveFile" then supply this flag to rclone to +indicate you acknowledge the risks of downloading the file and rclone +will download it anyway.`, Advanced: true, }, { Name: "keep_revision_forever", Default: false, - Help: "Keep new head revision forever.", + Help: "Keep new head revision of each file forever.", Advanced: true, }, { Name: "v2_download_min_size", diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index 87686bb3b..a9684e088 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -120,8 +120,15 @@ func init() { Name: config.ConfigClientSecret, Help: "Dropbox App Client Secret\nLeave blank normally.", }, { - Name: "chunk_size", - Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)), + Name: "chunk_size", + Help: fmt.Sprintf(`Upload chunk size. (< %v). + +Any files larger than this will be uploaded in chunks of this size. + +Note that chunks are buffered in memory (one at a time) so rclone can +deal with retries. Setting this larger will increase the speed +slightly (at most 10%% for 128MB in tests) at the cost of using more +memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)), Default: fs.SizeSuffix(defaultChunkSize), Advanced: true, }}, diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go index 1a478cbe5..ee58148f9 100644 --- a/backend/jottacloud/jottacloud.go +++ b/backend/jottacloud/jottacloud.go @@ -80,7 +80,7 @@ func init() { Advanced: true, }, { Name: "unlink", - Help: "Remove existing public link to file/folder with link command rather than creating.", + Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.", Default: false, Advanced: true, }}, diff --git a/backend/local/local.go b/backend/local/local.go index 8c6f30d6a..5ffee86ad 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -49,19 +49,33 @@ func init() { ShortOpt: "L", Advanced: true, }, { - Name: "skip_links", - Help: "Don't warn about skipped symlinks.", + Name: "skip_links", + Help: `Don't warn about skipped symlinks. +This flag disables warning messages on skipped symlinks or junction +points, as you explicitly acknowledge that they should be skipped.`, Default: false, NoPrefix: true, Advanced: true, }, { - Name: "no_unicode_normalization", - Help: "Don't apply unicode normalization to paths and filenames", + Name: "no_unicode_normalization", + Help: `Don't apply unicode normalization to paths and filenames (Deprecated) + +This flag is deprecated now. Rclone no longer normalizes unicode file +names, but it compares them with unicode normalization in the sync +routine instead.`, Default: false, Advanced: true, }, { - Name: "no_check_updated", - Help: "Don't check to see if the files change during upload", + Name: "no_check_updated", + Help: `Don't check to see if the files change during upload + +Normally rclone checks the size and modification time of files as they +are being uploaded and aborts with a message which starts "can't copy +- source file is being updated" if the file changes during upload. + +However on some file systems this modification time check may fail (eg +[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this +check can be disabled with this flag.`, Default: false, Advanced: true, }, { diff --git a/backend/mega/mega.go b/backend/mega/mega.go index a75444149..8d31f9682 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -63,13 +63,20 @@ func init() { Required: true, IsPassword: true, }, { - Name: "debug", - Help: "Output more debug from Mega.", + Name: "debug", + Help: `Output more debug from Mega. + +If this flag is set (along with -vv) it will print further debugging +information from the mega backend.`, Default: false, Advanced: true, }, { - Name: "hard_delete", - Help: "Delete files permanently rather than putting them into the trash.", + Name: "hard_delete", + Help: `Delete files permanently rather than putting them into the trash. + +Normally the mega backend will put all deletions into the trash rather +than permanently deleting them. If you specify this then rclone will +permanently delete objects instead.`, Default: false, Advanced: true, }}, diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 3ff37d559..66f58c3cb 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -212,8 +212,11 @@ func init() { Name: config.ConfigClientSecret, Help: "Microsoft App Client Secret\nLeave blank normally.", }, { - Name: "chunk_size", - Help: "Chunk size to upload files with - must be multiple of 320k.", + Name: "chunk_size", + Help: `Chunk size to upload files with - must be multiple of 320k. + +Above this size files will be chunked - must be multiple of 320k. Note +that the chunks will be buffered into memory.`, Default: fs.SizeSuffix(10 * 1024 * 1024), Advanced: true, }, { @@ -227,8 +230,14 @@ func init() { Default: "", Advanced: true, }, { - Name: "expose_onenote_files", - Help: "If true, OneNote files will show up in directory listing (see docs)", + Name: "expose_onenote_files", + Help: `Set to make OneNote files show up in directory listings. + +By default rclone will hide OneNote files in directory listings because +operations like "Open" and "Update" won't work on them. But this +behaviour may also prevent you from deleting them. If you want to +delete OneNote files or otherwise want them to show up in directory +listing, set this option.`, Default: false, Advanced: true, }}, diff --git a/backend/s3/s3.go b/backend/s3/s3.go index b7af0255f..bb51d46d8 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -517,7 +517,7 @@ func init() { }}, }, { Name: "storage_class", - Help: "The storage class to use when storing objects in S3.", + Help: "The storage class to use when storing new objects in S3.", Provider: "AWS", Examples: []fs.OptionExample{{ Value: "", @@ -536,8 +536,17 @@ func init() { Help: "One Zone Infrequent Access storage class", }}, }, { - Name: "chunk_size", - Help: "Chunk size to use for uploading", + Name: "chunk_size", + Help: `Chunk size to use for uploading. + +Any files larger than this will be uploaded in chunks of this +size. The default is 5MB. The minimum is 5MB. + +Note that "--s3-upload-concurrency" chunks of this size are buffered +in memory per transfer. + +If you are transferring large files over high speed links and you have +enough memory, then increasing this will speed up the transfers.`, Default: fs.SizeSuffix(s3manager.MinUploadPartSize), Advanced: true, }, { @@ -551,13 +560,27 @@ func init() { Hide: fs.OptionHideBoth, Advanced: true, }, { - Name: "upload_concurrency", - Help: "Concurrency for multipart uploads.", + Name: "upload_concurrency", + Help: `Concurrency for multipart uploads. + +This is the number of chunks of the same file that are uploaded +concurrently. + +If you are uploading small numbers of large file over high speed link +and these uploads do not fully utilize your bandwidth, then increasing +this may help to speed up the transfers.`, Default: 2, Advanced: true, }, { - Name: "force_path_style", - Help: "If true use path style access if false use virtual hosted style.\nSome providers (eg Aliyun OSS or Netease COS) require this.", + Name: "force_path_style", + Help: `If true use path style access if false use virtual hosted style. + +If this is true (the default) then rclone will use path style access, +if false then rclone will use virtual path style. See [the AWS S3 +docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) +for more info. + +Some providers (eg Aliyun OSS or Netease COS) require this set to false.`, Default: true, Advanced: true, }}, diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index d27a43c93..f2e5f868e 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -90,9 +90,20 @@ func init() { Help: "Allow asking for SFTP password when needed.", Advanced: true, }, { - Name: "path_override", - Default: "", - Help: "Override path used by SSH connection.", + Name: "path_override", + Default: "", + Help: `Override path used by SSH connection. + +This allows checksum calculation when SFTP and SSH paths are +different. This issue affects among others Synology NAS boxes. + +Shared folders can be found in directories representing volumes + + rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory + +Home directory can be found in a shared folder called "home" + + rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`, Advanced: true, }, { Name: "set_modtime", diff --git a/backend/swift/swift.go b/backend/swift/swift.go index a490b2077..f98a08001 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -33,8 +33,11 @@ const ( // SharedOptions are shared between swift and hubic var SharedOptions = []fs.Option{{ - Name: "chunk_size", - Help: "Above this size files will be chunked into a _segments container.", + Name: "chunk_size", + Help: `Above this size files will be chunked into a _segments container. + +Above this size files will be chunked into a _segments container. The +default for this is 5GB which is its maximum value.`, Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024), Advanced: true, }} @@ -129,8 +132,13 @@ func init() { Value: "admin", }}, }, { - Name: "storage_policy", - Help: "The storage policy to use when creating a new container", + Name: "storage_policy", + Help: `The storage policy to use when creating a new container + +This applies the specified storage policy when creating a new +container. The policy cannot be changed afterwards. The allowed +configuration values and their meaning depend on your Swift storage +provider.`, Default: "", Examples: []fs.OptionExample{{ Help: "Default", diff --git a/backend/union/union.go b/backend/union/union.go index b71331f71..5d614ce45 100644 --- a/backend/union/union.go +++ b/backend/union/union.go @@ -19,7 +19,7 @@ import ( func init() { fsi := &fs.RegInfo{ Name: "union", - Description: "Builds a stackable unification remote, which can appear to merge the contents of several remotes", + Description: "A stackable unification remote, which can appear to merge the contents of several remotes", NewFs: NewFs, Options: []fs.Option{{ Name: "remotes", diff --git a/docs/content/alias.md b/docs/content/alias.md index 77f54e099..8a0d1b744 100644 --- a/docs/content/alias.md +++ b/docs/content/alias.md @@ -128,3 +128,5 @@ Copy another local directory to the alias directory called source rclone copy /home/source remote:source + + diff --git a/docs/content/amazonclouddrive.md b/docs/content/amazonclouddrive.md index 08a77667c..52f115745 100644 --- a/docs/content/amazonclouddrive.md +++ b/docs/content/amazonclouddrive.md @@ -173,43 +173,8 @@ Let's say you usually use `amazon.co.uk`. When you authenticate with rclone it will take you to an `amazon.com` page to log in. Your `amazon.co.uk` email and password should work here just fine. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --acd-templink-threshold=SIZE #### - -Files this size or more will be downloaded via their `tempLink`. This -is to work around a problem with Amazon Drive which blocks downloads -of files bigger than about 10GB. The default for this is 9GB which -shouldn't need to be changed. - -To download files above this threshold, rclone requests a `tempLink` -which downloads the file through a temporary URL directly from the -underlying S3 storage. - -#### --acd-upload-wait-per-gb=TIME #### - -Sometimes Amazon Drive gives an error when a file has been fully -uploaded but the file appears anyway after a little while. This -happens sometimes for files over 1GB in size and nearly every time for -files bigger than 10GB. This parameter controls the time rclone waits -for the file to appear. - -The default value for this parameter is 3 minutes per GB, so by -default it will wait 3 minutes for every GB uploaded to see if the -file appears. - -You can disable this feature by setting it to 0. This may cause -conflict errors as rclone retries the failed upload but the file will -most likely appear correctly eventually. - -These values were determined empirically by observing lots of uploads -of big files for a range of file sizes. - -Upload with the `-v` flag to see more info about what rclone is doing -in this situation. + + ### Limitations ### diff --git a/docs/content/azureblob.md b/docs/content/azureblob.md index 837e25c5e..e0a93af65 100644 --- a/docs/content/azureblob.md +++ b/docs/content/azureblob.md @@ -168,39 +168,8 @@ upload which means that there is a limit of 9.5TB of multipart uploads in progress as Azure won't allow more than that amount of uncommitted blocks. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --azureblob-upload-cutoff=SIZE #### - -Cutoff for switching to chunked upload - must be <= 256MB. The default -is 256MB. - -#### --azureblob-chunk-size=SIZE #### - -Upload chunk size. Default 4MB. Note that this is stored in memory -and there may be up to `--transfers` chunks stored at once in memory. -This can be at most 100MB. - -#### --azureblob-list-chunk=SIZE #### - -List blob limit. Default is the maximum, 5000. `List blobs` requests -are permitted 2 minutes per megabyte to complete. If an operation is -taking longer than 2 minutes per megabyte on average, it will time out ( [source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval) ). This limit the number of blobs items to return, to avoid the time out. - - -#### --azureblob-access-tier=Hot/Cool/Archive #### - -Azure storage supports blob tiering, you can configure tier in advanced -settings or supply flag while performing data transfer operations. -If there is no `access tier` specified, rclone doesn't apply any tier. -rclone performs `Set Tier` operation on blobs while uploading, if objects -are not modified, specifying `access tier` to new one will have no effect. -If blobs are in `archive tier` at remote, trying to perform data transfer -operations from remote will not be allowed. User should first restore by -tiering blob to `Hot` or `Cool`. + + ### Limitations ### diff --git a/docs/content/b2.md b/docs/content/b2.md index 2455f1314..c7b4ac87a 100644 --- a/docs/content/b2.md +++ b/docs/content/b2.md @@ -265,46 +265,10 @@ start and finish the upload) and another 2 requests for each chunk: /b2api/v1/b2_finish_large_file ``` -### Specific options ### +#### Versions #### -Here are the command line options specific to this cloud storage -system. - -#### --b2-chunk-size valuee=SIZE #### - -When uploading large files chunk the file into this size. Note that -these chunks are buffered in memory and there might a maximum of -`--transfers` chunks in progress at once. 5,000,000 Bytes is the -minimim size (default 96M). - -#### --b2-upload-cutoff=SIZE #### - -Cutoff for switching to chunked upload (default 190.735 MiB == 200 -MB). Files above this size will be uploaded in chunks of -`--b2-chunk-size`. - -This value should be set no larger than 4.657GiB (== 5GB) as this is -the largest file size that can be uploaded. - - -#### --b2-test-mode=FLAG #### - -This is for debugging purposes only. - -Setting FLAG to one of the strings below will cause b2 to return -specific errors for debugging purposes. - - * `fail_some_uploads` - * `expire_some_account_authorization_tokens` - * `force_cap_exceeded` - -These will be set in the `X-Bz-Test-Mode` header which is documented -in the [b2 integrations -checklist](https://www.backblaze.com/b2/docs/integration_checklist.html). - -#### --b2-versions #### - -When set rclone will show and act on older versions of files. For example +Versions can be viewd with the `--b2-versions` flag. When it is set +rclone will show and act on older versions of files. For example Listing without `--b2-versions` @@ -329,3 +293,7 @@ server to the nearest millisecond appended to them. Note that when using `--b2-versions` no file write operations are permitted, so you can't upload files or delete them. + + + + diff --git a/docs/content/box.md b/docs/content/box.md index 719158009..eaa688c85 100644 --- a/docs/content/box.md +++ b/docs/content/box.md @@ -217,19 +217,8 @@ normally 8MB so increasing `--transfers` will increase memory use. Depending on the enterprise settings for your user, the item will either be actually deleted from Box or moved to the trash. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --box-upload-cutoff=SIZE #### - -Cutoff for switching to chunked upload - must be >= 50MB. The default -is 50MB. - -#### --box-commit-retries int #### - -Max number of times to try committing a multipart file. (default 100) + + ### Limitations ### diff --git a/docs/content/cache.md b/docs/content/cache.md index 250fd2b8d..347db9e27 100644 --- a/docs/content/cache.md +++ b/docs/content/cache.md @@ -290,168 +290,5 @@ Params: - **remote** = path to remote **(required)** - **withData** = true/false to delete cached data (chunks) as well _(optional, false by default)_ -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --cache-db-path=PATH #### - -Path to where the file structure metadata (DB) is stored locally. The remote -name is used as the DB file name. - -**Default**: /cache-backend/ -**Example**: /.cache/cache-backend/test-cache - -#### --cache-chunk-path=PATH #### - -Path to where partial file data (chunks) is stored locally. The remote -name is appended to the final path. - -This config follows the `--cache-db-path`. If you specify a custom -location for `--cache-db-path` and don't specify one for `--cache-chunk-path` -then `--cache-chunk-path` will use the same path as `--cache-db-path`. - -**Default**: /cache-backend/ -**Example**: /.cache/cache-backend/test-cache - -#### --cache-db-purge #### - -Flag to clear all the cached data for this remote on start. - -**Default**: not set - -#### --cache-chunk-size=SIZE #### - -The size of a chunk (partial file data). Use lower numbers for slower -connections. If the chunk size is changed, any downloaded chunks will be invalid and cache-chunk-path will need to be cleared or unexpected EOF errors will occur. - -**Default**: 5M - -#### --cache-chunk-total-size=SIZE #### - -The total size that the chunks can take up on the local disk. If `cache` -exceeds this value then it will start to the delete the oldest chunks until -it goes under this value. - -**Default**: 10G - -#### --cache-chunk-clean-interval=DURATION #### - -How often should `cache` perform cleanups of the chunk storage. The default value -should be ok for most people. If you find that `cache` goes over `cache-chunk-total-size` -too often then try to lower this value to force it to perform cleanups more often. - -**Default**: 1m - -#### --cache-info-age=DURATION #### - -How long to keep file structure information (directory listings, file size, -mod times etc) locally. - -If all write operations are done through `cache` then you can safely make -this value very large as the cache store will also be updated in real time. - -**Default**: 6h - -#### --cache-read-retries=RETRIES #### - -How many times to retry a read from a cache storage. - -Since reading from a `cache` stream is independent from downloading file data, -readers can get to a point where there's no more data in the cache. -Most of the times this can indicate a connectivity issue if `cache` isn't -able to provide file data anymore. - -For really slow connections, increase this to a point where the stream is -able to provide data but your experience will be very stuttering. - -**Default**: 10 - -#### --cache-workers=WORKERS #### - -How many workers should run in parallel to download chunks. - -Higher values will mean more parallel processing (better CPU needed) and -more concurrent requests on the cloud provider. -This impacts several aspects like the cloud provider API limits, more stress -on the hardware that rclone runs on but it also means that streams will -be more fluid and data will be available much more faster to readers. - -**Note**: If the optional Plex integration is enabled then this setting -will adapt to the type of reading performed and the value specified here will be used -as a maximum number of workers to use. -**Default**: 4 - -#### --cache-chunk-no-memory #### - -By default, `cache` will keep file data during streaming in RAM as well -to provide it to readers as fast as possible. - -This transient data is evicted as soon as it is read and the number of -chunks stored doesn't exceed the number of workers. However, depending -on other settings like `cache-chunk-size` and `cache-workers` this footprint -can increase if there are parallel streams too (multiple files being read -at the same time). - -If the hardware permits it, use this feature to provide an overall better -performance during streaming but it can also be disabled if RAM is not -available on the local machine. - -**Default**: not set - -#### --cache-rps=NUMBER #### - -This setting places a hard limit on the number of requests per second that `cache` -will be doing to the cloud provider remote and try to respect that value -by setting waits between reads. - -If you find that you're getting banned or limited on the cloud provider -through cache and know that a smaller number of requests per second will -allow you to work with it then you can use this setting for that. - -A good balance of all the other settings should make this -setting useless but it is available to set for more special cases. - -**NOTE**: This will limit the number of requests during streams but other -API calls to the cloud provider like directory listings will still pass. - -**Default**: disabled - -#### --cache-writes #### - -If you need to read files immediately after you upload them through `cache` -you can enable this flag to have their data stored in the cache store at the -same time during upload. - -**Default**: not set - -#### --cache-tmp-upload-path=PATH #### - -This is the path where `cache` will use as a temporary storage for new files -that need to be uploaded to the cloud provider. - -Specifying a value will enable this feature. Without it, it is completely disabled -and files will be uploaded directly to the cloud provider - -**Default**: empty - -#### --cache-tmp-wait-time=DURATION #### - -This is the duration that a file must wait in the temporary location -_cache-tmp-upload-path_ before it is selected for upload. - -Note that only one file is uploaded at a time and it can take longer to -start the upload if a queue formed for this purpose. - -**Default**: 15m - -#### --cache-db-wait-time=DURATION #### - -Only one process can have the DB open at any one time, so rclone waits -for this duration for the DB to become available before it gives an -error. - -If you set it to 0 then it will wait forever. - -**Default**: 1s + + diff --git a/docs/content/crypt.md b/docs/content/crypt.md index b2c42a74d..5960c0615 100644 --- a/docs/content/crypt.md +++ b/docs/content/crypt.md @@ -294,20 +294,8 @@ Note that you should use the `rclone cryptcheck` command to check the integrity of a crypted remote instead of `rclone check` which can't check the checksums properly. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --crypt-show-mapping #### - -If this flag is set then for each file that the remote is asked to -list, it will log (at level INFO) a line stating the decrypted file -name and the encrypted file name. - -This is so you can work out which encrypted names are which decrypted -names just in case you need to do something with the encrypted file -names, or for debugging purposes. + + ## Backing up a crypted remote ## diff --git a/docs/content/drive.md b/docs/content/drive.md index e707bf876..42b6a5e75 100644 --- a/docs/content/drive.md +++ b/docs/content/drive.md @@ -388,33 +388,7 @@ Drive, the size of all files in the Trash and the space used by other Google services such as Gmail. This command does not take any path arguments. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --drive-acknowledge-abuse #### - -If downloading a file returns the error `This file has been identified -as malware or spam and cannot be downloaded` with the error code -`cannotDownloadAbusiveFile` then supply this flag to rclone to -indicate you acknowledge the risks of downloading the file and rclone -will download it anyway. - -#### --drive-auth-owner-only #### - -Only consider files owned by the authenticated user. - -#### --drive-chunk-size=SIZE #### - -Upload chunk size. Must a power of 2 >= 256k. Default value is 8 MB. - -Making this larger will improve performance, but note that each chunk -is buffered in memory one per transfer. - -Reducing this will reduce memory usage but decrease performance. - -#### --drive-export-formats / --drive-import-formats #### +#### Import/Export of google documents #### Google documents can be exported from and uploaded to Google Drive. @@ -509,77 +483,8 @@ Google Documents. | url | INI style link file | macOS, Windows | | webloc | macOS specific XML format | macOS | -#### --drive-alternate-export #### - -If this option is set this instructs rclone to use an alternate set of -export URLs for drive documents. Users have reported that the -official export URLs can't export large documents, whereas these -unofficial ones can. - -See rclone issue [#2243](https://github.com/ncw/rclone/issues/2243) for background, -[this google drive issue](https://issuetracker.google.com/issues/36761333) and -[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/). - -#### --drive-impersonate user #### - -When using a service account, this instructs rclone to impersonate the user passed in. - -#### --drive-keep-revision-forever #### - -Keeps new head revision of the file forever. - -#### --drive-list-chunk int #### - -Size of listing chunk 100-1000. 0 to disable. (default 1000) - -#### --drive-shared-with-me #### - -Instructs rclone to operate on your "Shared with me" folder (where -Google Drive lets you access the files and folders others have shared -with you). - -This works both with the "list" (lsd, lsl, etc) and the "copy" -commands (copy, sync, etc), and with all other commands too. - -#### --drive-skip-gdocs #### - -Skip google documents in all listings. If given, gdocs practically become invisible to rclone. - -#### --drive-trashed-only #### - -Only show files that are in the trash. This will show trashed files -in their original directory structure. - -#### --drive-upload-cutoff=SIZE #### - -File size cutoff for switching to chunked upload. Default is 8 MB. - -#### --drive-use-trash #### - -Controls whether files are sent to the trash or deleted -permanently. Defaults to true, namely sending files to the trash. Use -`--drive-use-trash=false` to delete files permanently instead. - -#### --drive-use-created-date #### - -Use the file creation date in place of the modification date. Defaults -to false. - -Useful when downloading data and you want the creation date used in -place of the last modified date. - -**WARNING**: This flag may have some unexpected consequences. - -When uploading to your drive all files will be overwritten unless they -haven't been modified since their creation. And the inverse will occur -while downloading. This side effect can be avoided by using the -`--checksum` flag. - -This feature was implemented to retain photos capture date as recorded -by google photos. You will first need to check the "Create a Google -Photos folder" option in your google drive settings. You can then copy -or move the photos locally and use the date the image was taken -(created) set as the modification date. + + ### Limitations ### diff --git a/docs/content/dropbox.md b/docs/content/dropbox.md index 1b391fce7..f9bdb3d4b 100644 --- a/docs/content/dropbox.md +++ b/docs/content/dropbox.md @@ -123,20 +123,8 @@ Dropbox supports [its own hash type](https://www.dropbox.com/developers/reference/content-hash) which is checked for all transfers. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --dropbox-chunk-size=SIZE #### - -Any files larger than this will be uploaded in chunks of this -size. The default is 48MB. The maximum is 150MB. - -Note that chunks are buffered in memory (one at a time) so rclone can -deal with retries. Setting this larger will increase the speed -slightly (at most 10% for 128MB in tests) at the cost of using more -memory. It can be set smaller if you are tight on memory. + + ### Limitations ### diff --git a/docs/content/ftp.md b/docs/content/ftp.md index 4d69c5210..e7fda1014 100644 --- a/docs/content/ftp.md +++ b/docs/content/ftp.md @@ -119,6 +119,9 @@ will be time of upload. FTP does not support any checksums. + + + ### Limitations ### Note that since FTP isn't HTTP based the following flags don't work diff --git a/docs/content/googlecloudstorage.md b/docs/content/googlecloudstorage.md index 920e997fd..86af18cb7 100644 --- a/docs/content/googlecloudstorage.md +++ b/docs/content/googlecloudstorage.md @@ -228,3 +228,6 @@ docs](/docs/#fast-list) for more details. Google google cloud storage stores md5sums natively and rclone stores modification times as metadata on the object, under the "mtime" key in RFC3339 format accurate to 1ns. + + + diff --git a/docs/content/http.md b/docs/content/http.md index 83bff5871..4c0764453 100644 --- a/docs/content/http.md +++ b/docs/content/http.md @@ -125,3 +125,6 @@ Since the http remote only has one config parameter it is easy to use without a config file: rclone lsd --http-url https://beta.rclone.org :http: + + + diff --git a/docs/content/hubic.md b/docs/content/hubic.md index 1156b7c49..da86c6480 100644 --- a/docs/content/hubic.md +++ b/docs/content/hubic.md @@ -128,6 +128,9 @@ amongst others) for storing the modification time for an object. Note that Hubic wraps the Swift backend, so most of the properties of are the same. + + + ### Limitations ### This uses the normal OpenStack Swift mechanism to refresh the Swift diff --git a/docs/content/jottacloud.md b/docs/content/jottacloud.md index c6ee157f3..258f6f7cc 100644 --- a/docs/content/jottacloud.md +++ b/docs/content/jottacloud.md @@ -124,6 +124,9 @@ To view your current quota you can use the `rclone about remote:` command which will display your usage limit (unless it is unlimited) and the current usage. + + + ### Limitations ### Note that Jottacloud is case insensitive so you can't have a file called @@ -133,27 +136,6 @@ There are quite a few characters that can't be in Jottacloud file names. Rclone Jottacloud only supports filenames up to 255 characters in length. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --jottacloud-md5-memory-limit SizeSuffix - -Files bigger than this will be cached on disk to calculate the MD5 if -required. (default 10M) - -#### --jottacloud-hard-delete #### - -Controls whether files are sent to the trash or deleted -permanently. Defaults to false, namely sending files to the trash. -Use `--jottacloud-hard-delete=true` to delete files permanently instead. - -#### --jottacloud-unlink #### - -Set to true to make the link command remove existing public link to file/folder. -Default is false, meaning link command will create or retrieve public link. - ### Troubleshooting ### Jottacloud exhibits some inconsistent behaviours regarding deleted files and folders which may cause Copy, Move and DirMove operations to previously deleted paths to fail. Emptying the trash should help in such cases. \ No newline at end of file diff --git a/docs/content/local.md b/docs/content/local.md index a77abf94b..88936cee4 100644 --- a/docs/content/local.md +++ b/docs/content/local.md @@ -75,17 +75,13 @@ This will use UNC paths on `c:\src` but not on `z:\dst`. Of course this will cause problems if the absolute path length of a file exceeds 258 characters on z, so only use this option if you have to. -### Specific options ### - -Here are the command line options specific to local storage - -#### --copy-links, -L #### +### Symlinks / Junction points Normally rclone will ignore symlinks or junction points (which behave like symlinks under Windows). -If you supply this flag then rclone will follow the symlink and copy -the pointed to file or directory. +If you supply `--copy-links` or `-L` then rclone will follow the +symlink and copy the pointed to file or directory. This flag applies to all commands. @@ -120,28 +116,13 @@ $ rclone -L ls /tmp/a 6 b/one ``` -#### --local-no-check-updated #### +### Restricting filesystems with --one-file-system -Don't check to see if the files change during upload. +Normally rclone will recurse through filesystems as mounted. -Normally rclone checks the size and modification time of files as they -are being uploaded and aborts with a message which starts `can't copy -- source file is being updated` if the file changes during upload. - -However on some file systems this modification time check may fail (eg -[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this -check can be disabled with this flag. - -#### --local-no-unicode-normalization #### - -This flag is deprecated now. Rclone no longer normalizes unicode file -names, but it compares them with unicode normalization in the sync -routine instead. - -#### --one-file-system, -x #### - -This tells rclone to stay in the filesystem specified by the root and -not to recurse into different file systems. +However if you set `--one-file-system` or `-x` this tells rclone to +stay in the filesystem specified by the root and not to recurse into +different file systems. For example if you have a directory hierarchy like this @@ -176,10 +157,7 @@ treats a bind mount to the same device as being on the same filesystem. **NB** This flag is only available on Unix based systems. On systems -where it isn't supported (eg Windows) it will not appear as an valid -flag. +where it isn't supported (eg Windows) it will be ignored. -#### --skip-links #### - -This flag disables warning messages on skipped symlinks or junction -points, as you explicitly acknowledge that they should be skipped. + + diff --git a/docs/content/mega.md b/docs/content/mega.md index a17db13bd..782213911 100644 --- a/docs/content/mega.md +++ b/docs/content/mega.md @@ -96,22 +96,8 @@ messages in the log about duplicates. Use `rclone dedupe` to fix duplicated files. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --mega-debug #### - -If this flag is set (along with `-vv`) it will print further debugging -information from the mega backend. - -#### --mega-hard-delete #### - -Normally the mega backend will put all deletions into the trash rather -than permanently deleting them. If you specify this flag (or set it -in the advanced config) then rclone will permanently delete objects -instead. + + ### Limitations ### diff --git a/docs/content/onedrive.md b/docs/content/onedrive.md index 3ea94d90f..add6b9d68 100644 --- a/docs/content/onedrive.md +++ b/docs/content/onedrive.md @@ -155,22 +155,8 @@ doesn't provide an API to permanently delete files, nor to empty the trash, so you will have to do that with one of Microsoft's apps or via the OneDrive website. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --onedrive-chunk-size=SIZE #### - -Above this size files will be chunked - must be multiple of 320k. The -default is 10MB. Note that the chunks will be buffered into memory. - -#### --onedrive-expose-onenote-files #### - -By default rclone will hide OneNote files in directory listing because operations like `Open` -and `Update` won't work on them. But this behaviour may also prevent you from deleting them. -If you want to delete OneNote files or otherwise want them to show up in directory listing, -set this flag. + + ### Limitations ### diff --git a/docs/content/opendrive.md b/docs/content/opendrive.md index e40d0184a..229774963 100644 --- a/docs/content/opendrive.md +++ b/docs/content/opendrive.md @@ -93,13 +93,8 @@ OpenDrive allows modification times to be set on objects accurate to 1 second. These will be used to detect whether objects need syncing or not. -### Deleting files ### - -Any files you delete with rclone will end up in the trash. Amazon -don't provide an API to permanently delete files, nor to empty the -trash, so you will have to do that with one of Amazon's apps or via -the OpenDrive website. As of November 17, 2016, files are -automatically deleted by Amazon from the trash after 30 days. + + ### Limitations ### diff --git a/docs/content/pcloud.md b/docs/content/pcloud.md index 0194b37db..34c75c56e 100644 --- a/docs/content/pcloud.md +++ b/docs/content/pcloud.md @@ -133,3 +133,6 @@ pCloud supports MD5 and SHA1 type hashes, so you can use the Deleted files will be moved to the trash. Your subscription level will determine how long items stay in the trash. `rclone cleanup` can be used to empty the trash. + + + diff --git a/docs/content/qingstor.md b/docs/content/qingstor.md index 7fd66a5a9..476251976 100644 --- a/docs/content/qingstor.md +++ b/docs/content/qingstor.md @@ -152,3 +152,5 @@ credentials. In order of precedence: - Access Key ID: `QS_ACCESS_KEY_ID` or `QS_ACCESS_KEY` - Secret Access Key: `QS_SECRET_ACCESS_KEY` or `QS_SECRET_KEY` + + diff --git a/docs/content/s3.md b/docs/content/s3.md index 8732401f4..5000a98c5 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -370,56 +370,8 @@ tries to access the data you will see an error like below. In this case you need to [restore](http://docs.aws.amazon.com/AmazonS3/latest/user-guide/restore-archived-objects.html) the object(s) in question before using rclone. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --s3-acl=STRING #### - -Canned ACL used when creating buckets and/or storing objects in S3. - -For more info visit the [canned ACL docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl). - -#### --s3-storage-class=STRING #### - -Storage class to upload new objects with. - -Available options include: - - - STANDARD - default storage class - - STANDARD_IA - for less frequently accessed data (e.g backups) - - ONEZONE_IA - for storing data in only one Availability Zone - - REDUCED_REDUNDANCY (only for noncritical, reproducible data, has lower redundancy) - -#### --s3-chunk-size=SIZE #### - -Any files larger than this will be uploaded in chunks of this -size. The default is 5MB. The minimum is 5MB. - -Note that 2 chunks of this size are buffered in memory per transfer. - -If you are transferring large files over high speed links and you have -enough memory, then increasing this will speed up the transfers. - -#### --s3-force-path-style=BOOL #### - -If this is true (the default) then rclone will use path style access, -if false then rclone will use virtual path style. See [the AWS S3 -docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) -for more info. - -Some providers (eg Aliyun OSS or Netease COS) require this set to -`false`. It can also be set in the config in the advanced section. - -#### --s3-upload-concurrency #### - -Number of chunks of the same file that are uploaded concurrently. -Default is 2. - -If you are uploading small amount of large file over high speed link -and these uploads do not fully utilize your bandwidth, then increasing -this may help to speed up the transfers. + + ### Anonymous access to public buckets ### diff --git a/docs/content/sftp.md b/docs/content/sftp.md index 811063121..41cc9f877 100644 --- a/docs/content/sftp.md +++ b/docs/content/sftp.md @@ -147,28 +147,6 @@ And then at the end of the session These commands can be used in scripts of course. -### Specific options ### - -Here are the command line options specific to this remote. - -#### --sftp-ask-password #### - -Ask for the SFTP password if needed when no password has been configured. - -#### --ssh-path-override #### - -Override path used by SSH connection. Allows checksum calculation when -SFTP and SSH paths are different. This issue affects among others Synology -NAS boxes. - -Shared folders can be found in directories representing volumes - - rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory - -Home directory can be found in a shared folder called `homes` - - rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory - ### Modified time ### Modified times are stored on the server to 1 second precision. @@ -180,6 +158,9 @@ upload (for example, certain configurations of ProFTPd with mod_sftp). If you are using one of these servers, you can set the option `set_modtime = false` in your RClone backend configuration to disable this behaviour. + + + ### Limitations ### SFTP supports checksums if the same login has shell access and `md5sum` diff --git a/docs/content/swift.md b/docs/content/swift.md index 6152245bf..4fc5c7dff 100644 --- a/docs/content/swift.md +++ b/docs/content/swift.md @@ -261,20 +261,8 @@ sufficient to determine if it is "dirty". By using `--update` along with `--use-server-modtime`, you can avoid the extra API call and simply upload files whose local modtime is newer than the time it was last uploaded. -### Specific options ### - -Here are the command line options specific to this cloud storage -system. - -#### --swift-storage-policy=STRING #### -Apply the specified storage policy when creating a new container. The policy -cannot be changed afterwards. The allowed configuration values and their -meaning depend on your Swift storage provider. - -#### --swift-chunk-size=SIZE #### - -Above this size files will be chunked into a _segments container. The -default for this is 5GB which is its maximum value. + + ### Modified time ### diff --git a/docs/content/union.md b/docs/content/union.md index bc93478f2..158694112 100644 --- a/docs/content/union.md +++ b/docs/content/union.md @@ -142,3 +142,5 @@ Copy another local directory to the union directory called source, which will be rclone copy C:\source remote:source + + diff --git a/docs/content/webdav.md b/docs/content/webdav.md index c85a99e79..c93f4eb97 100644 --- a/docs/content/webdav.md +++ b/docs/content/webdav.md @@ -101,6 +101,9 @@ Owncloud or Nextcloud rclone will support modified times. Hashes are not supported. + + + ## Provider notes ## See below for notes on specific providers. diff --git a/docs/content/yandex.md b/docs/content/yandex.md index 91bff66fb..4db781025 100644 --- a/docs/content/yandex.md +++ b/docs/content/yandex.md @@ -127,3 +127,6 @@ MD5 checksums are natively supported by Yandex Disk. If you wish to empty your trash you can use the `rclone cleanup remote:` command which will permanently delete all your trashed files. This command does not take any path arguments. + + +