From 2001cc08315f7f488c4a3f9950a57fbb81f26d96 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Tue, 20 Dec 2022 17:16:14 +0000 Subject: [PATCH] Version v1.61.0 --- MANUAL.html | 2580 +++++++----- MANUAL.md | 2765 +++++++++---- MANUAL.txt | 2883 +++++++++----- docs/content/changelog.md | 95 + docs/content/commands/rclone_about.md | 1 + docs/content/commands/rclone_authorize.md | 1 + docs/content/commands/rclone_backend.md | 1 + docs/content/commands/rclone_bisync.md | 1 + docs/content/commands/rclone_cat.md | 1 + docs/content/commands/rclone_checksum.md | 1 + docs/content/commands/rclone_cleanup.md | 1 + docs/content/commands/rclone_config.md | 1 + docs/content/commands/rclone_config_create.md | 3 +- docs/content/commands/rclone_config_delete.md | 1 + docs/content/commands/rclone_config_dump.md | 1 + docs/content/commands/rclone_config_file.md | 1 + .../commands/rclone_config_password.md | 1 + docs/content/commands/rclone_config_paths.md | 1 + .../commands/rclone_config_providers.md | 1 + docs/content/commands/rclone_config_show.md | 1 + docs/content/commands/rclone_config_touch.md | 1 + docs/content/commands/rclone_config_update.md | 3 +- docs/content/commands/rclone_copyto.md | 1 + docs/content/commands/rclone_copyurl.md | 1 + docs/content/commands/rclone_cryptcheck.md | 1 + docs/content/commands/rclone_cryptdecode.md | 1 + docs/content/commands/rclone_dedupe.md | 1 + docs/content/commands/rclone_delete.md | 1 + docs/content/commands/rclone_deletefile.md | 1 + .../commands/rclone_genautocomplete.md | 1 + docs/content/commands/rclone_gendocs.md | 1 + docs/content/commands/rclone_hashsum.md | 1 + docs/content/commands/rclone_link.md | 1 + docs/content/commands/rclone_listremotes.md | 1 + docs/content/commands/rclone_lsf.md | 1 + docs/content/commands/rclone_lsjson.md | 2 + docs/content/commands/rclone_lsl.md | 1 + docs/content/commands/rclone_md5sum.md | 1 + docs/content/commands/rclone_mount.md | 21 +- docs/content/commands/rclone_move.md | 1 + docs/content/commands/rclone_moveto.md | 1 + docs/content/commands/rclone_ncdu.md | 6 +- docs/content/commands/rclone_obscure.md | 1 + docs/content/commands/rclone_rc.md | 1 + docs/content/commands/rclone_rcat.md | 1 + docs/content/commands/rclone_rcd.md | 96 + docs/content/commands/rclone_rmdirs.md | 1 + docs/content/commands/rclone_selfupdate.md | 1 + docs/content/commands/rclone_serve.md | 1 + docs/content/commands/rclone_serve_dlna.md | 17 +- docs/content/commands/rclone_serve_docker.md | 21 +- docs/content/commands/rclone_serve_ftp.md | 15 +- docs/content/commands/rclone_serve_http.md | 33 +- docs/content/commands/rclone_serve_restic.md | 78 +- docs/content/commands/rclone_serve_sftp.md | 15 +- docs/content/commands/rclone_serve_webdav.md | 74 +- docs/content/commands/rclone_settier.md | 1 + docs/content/commands/rclone_sha1sum.md | 1 + docs/content/commands/rclone_size.md | 1 + docs/content/commands/rclone_test.md | 1 + .../commands/rclone_test_changenotify.md | 3 +- .../content/commands/rclone_test_histogram.md | 1 + docs/content/commands/rclone_test_info.md | 3 +- docs/content/commands/rclone_test_makefile.md | 1 + .../content/commands/rclone_test_makefiles.md | 2 + docs/content/commands/rclone_test_memory.md | 1 + docs/content/commands/rclone_touch.md | 1 + docs/content/commands/rclone_tree.md | 1 + docs/content/commands/rclone_version.md | 1 + docs/content/flags.md | 1117 +++--- docs/content/ftp.md | 4 +- docs/content/local.md | 2 +- docs/content/mailru.md | 5 + docs/content/rc.md | 58 +- docs/content/s3.md | 106 +- docs/content/sftp.md | 61 + go.mod | 4 +- go.sum | 4 - rclone.1 | 3457 ++++++++++++----- 79 files changed, 9362 insertions(+), 4220 deletions(-) diff --git a/MANUAL.html b/MANUAL.html index 4b87ec4b1..f37861a96 100644 --- a/MANUAL.html +++ b/MANUAL.html @@ -19,7 +19,7 @@

rclone(1) User Manual

Nick Craig-Wood

-

Oct 21, 2022

+

Dec 20, 2022

Rclone syncs your files to cloud storage

rclone logo

@@ -105,6 +105,7 @@
  • IDrive e2
  • IONOS Cloud
  • Koofr
  • +
  • Liara Object Storage
  • Mail.ru Cloud
  • Memset Memstore
  • Mega
  • @@ -227,9 +228,9 @@ macOS cannot verify that this app is free from malware.

    Precompiled binary

    Fetch the correct binary for your processor type by clicking on these links. If not sure, use the first link.

    Open this file in the Explorer and extract rclone.exe. Rclone is a portable executable so you can place it wherever is convenient.

    Open a CMD window (or powershell) and run the binary. Note that rclone does not launch a GUI by default, it runs in the CMD Window.

    @@ -1189,7 +1190,7 @@ rclone config create myremote swift env_auth=true "State": "*oauth-islocal,teamdrive,,", "Option": { "Name": "config_is_local", - "Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n", + "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Default": true, "Examples": [ { @@ -1386,7 +1387,7 @@ rclone config update myremote env_auth=true "State": "*oauth-islocal,teamdrive,,", "Option": { "Name": "config_is_local", - "Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n", + "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Default": true, "Examples": [ { @@ -1858,6 +1859,7 @@ rclone copy --files-from-raw new_files /path/to/local remote:path --hash Include hashes in the output (may take longer) --hash-type stringArray Show only this hash type (may be repeated) -h, --help help for lsjson + -M, --metadata Add metadata to the listing --no-mimetype Don't read the mime type (can speed things up) --no-modtime Don't read the modification time (can speed things up) --original Show the ID of the underlying Object @@ -2106,14 +2108,14 @@ WantedBy=multi-user.target --allow-other Allow access to other users (not supported on Windows) --allow-root Allow access to root user (not supported on Windows) --async-read Use asynchronous reads (not supported on Windows) (default true) - --attr-timeout duration Time for which file/directory attributes are cached (default 1s) + --attr-timeout Duration Time for which file/directory attributes are cached (default 1s) --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows) - --daemon-timeout duration Time limit for rclone to respond to kernel (not supported on Windows) - --daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) + --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s) + --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) --debug-fuse Debug the FUSE internals - needs -v --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) @@ -2127,24 +2129,24 @@ WantedBy=multi-user.target --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true) --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only) -o, --option stringArray Option for libfuse/WinFsp (repeat if required) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)

    See the global flags page for global options not listed here.

    @@ -2186,11 +2188,12 @@ if src is directory
     ↑,↓ or k,j to Move
      →,l to enter
      ←,h to return
    - c toggle counts
      g toggle graph
    + c toggle counts
      a toggle average size in directory
    + m toggle modified time
      u toggle human-readable format
    - n,s,C,A sort by name,size,count,average size
    + n,s,C,A,M sort by name,size,count,asize,mtime
      d delete file/directory
      v select file/directory
      V enter visual select mode
    @@ -2302,6 +2305,109 @@ ffmpeg - | rclone rcat remote:path/to/file

    This is useful if you are controlling rclone via the rc API.

    If you pass in a path to a directory, rclone will serve that directory for GET requests on the URL passed in. It will also open the URL in the browser when rclone is run.

    See the rc documentation for more info on the rc flags.

    +

    Server options

    +

    Use --addr to specify which IP address and port the server should listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port.

    +

    If you set --addr to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info.

    +

    You can use a unix socket by setting the url to unix:///path/to/socket or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions.

    +

    --addr may be repeated to listen on multiple IPs/ports/sockets.

    +

    --server-read-timeout and --server-write-timeout can be used to control the timeouts on the server. Note that this is the total time for a transfer.

    +

    --max-header-bytes controls the maximum number of bytes the server will accept in the HTTP header.

    +

    --baseurl controls the URL prefix that rclone serves from. By default rclone will serve from the root. If you used --baseurl "/rclone" then rclone would serve from a URL starting with "/rclone/". This is useful if you wish to proxy rclone serve. Rclone automatically inserts leading and trailing "/" on --baseurl, so --baseurl "rclone", --baseurl "/rclone" and --baseurl "/rclone/" are all treated identically.

    +

    TLS (SSL)

    +

    By default this will serve over http. If you want you can serve over https. You will need to supply the --cert and --key flags. If you wish to do client side certificate validation then you will need to supply --client-ca also.

    +

    --cert should be a either a PEM encoded certificate or a concatenation of that with the CA certificate. --key should be the PEM encoded private key and --client-ca should be the PEM encoded client certificate authority certificate.

    +

    --min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").

    +

    Template

    +

    --template allows a user to specify a custom markup template for HTTP and WebDAV serve functions. The server exports the following markup to be used within the template to server pages:

    + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ParameterDescription
    .NameThe full path of a file/directory.
    .TitleDirectory listing of .Name
    .SortThe current sort used. This is changeable via ?sort= parameter
    Sort Options: namedirfirst,name,size,time (default namedirfirst)
    .OrderThe current ordering used. This is changeable via ?order= parameter
    Order Options: asc,desc (default asc)
    .QueryCurrently unused.
    .BreadcrumbAllows for creating a relative navigation
    -- .LinkThe relative to the root link of the Text.
    -- .TextThe Name of the directory.
    .EntriesInformation about a specific file/directory.
    -- .URLThe 'url' of an entry.
    -- .LeafCurrently same as 'URL' but intended to be 'just' the name.
    -- .IsDirBoolean for if an entry is a directory or not.
    -- .SizeSize in Bytes of the entry.
    -- .ModTimeThe UTC timestamp of an entry.
    +

    Authentication

    +

    By default this will serve files without needing a login.

    +

    You can either use an htpasswd file which can take lots of users, or set a single username and password with the --user and --pass flags.

    +

    Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is in standard apache format and supports MD5, SHA1 and BCrypt for basic authentication. Bcrypt is recommended.

    +

    To create an htpasswd file:

    +
    touch htpasswd
    +htpasswd -B htpasswd user
    +htpasswd -B htpasswd anotherUser
    +

    The password file can be updated while rclone is running.

    +

    Use --realm to set the authentication realm.

    +

    Use --salt to change the password hashing salt from the default.

    rclone rcd <path to files to serve>* [flags]

    Options

      -h, --help   help for rcd
    @@ -2377,7 +2483,7 @@ ffmpeg - | rclone rcat remote:path/to/file

    Synopsis

    Run a DLNA media server for media stored in an rclone remote. Many devices, such as the Xbox and PlayStation, can automatically discover this server in the LAN and play audio/video from it. VLC is also supported. Service discovery uses UDP multicast packets (SSDP) and will thus only work on LANs.

    Rclone will list all files present in the remote, without filtering based on media formats or file extensions. Additionally, there is no media transcoding support. This means that some players might show files that they are not able to play back correctly.

    -

    Server options

    +

    Server options

    Use --addr to specify which IP address and port the server should listen on, e.g. --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs.

    Use --name to choose the friendly server name, which is by default "rclone (hostname)".

    Use --log-trace in conjunction with -vv to enable additional debug logging of all UPNP traffic.

    @@ -2499,8 +2605,8 @@ ffmpeg - | rclone rcat remote:path/to/file
    rclone serve dlna remote:path [flags]

    Options

          --addr string                            The ip:port or :port to bind the DLNA http server to (default ":7879")
    -      --announce-interval duration             The interval between SSDP announcements (default 12m0s)
    -      --dir-cache-time duration                Time to cache directory entries for (default 5m0s)
    +      --announce-interval Duration             The interval between SSDP announcements (default 12m0s)
    +      --dir-cache-time Duration                Time to cache directory entries for (default 5m0s)
           --dir-perms FileMode                     Directory permissions (default 0777)
           --file-perms FileMode                    File permissions (default 0666)
           --gid uint32                             Override the gid field set by the filesystem (not supported on Windows) (default 1000)
    @@ -2511,24 +2617,24 @@ ffmpeg - | rclone rcat remote:path/to/file
    --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)

    See the global flags page for global options not listed here.

    SEE ALSO

    -

    --s3-endpoint

    +

    --s3-endpoint

    Endpoint of the Shared Gateway.

    Properties:

    -

    --s3-endpoint

    +

    --s3-endpoint

    Endpoint for Tencent COS API.

    Properties:

    -

    --s3-endpoint

    +

    --s3-endpoint

    Endpoint for RackCorp Object Storage.

    Properties:

    -

    --s3-endpoint

    +

    --s3-endpoint

    Endpoint for Qiniu Object Storage.

    Properties:

    -

    --s3-endpoint

    +

    --s3-endpoint

    Endpoint for S3 API.

    Required when using an S3 clone.

    Properties:

    -

    --s3-storage-class

    +

    --s3-storage-class

    The storage class to use when storing new objects in Tencent COS.

    Properties:

    -

    --s3-storage-class

    +

    --s3-storage-class

    The storage class to use when storing new objects in S3.

    Properties:

    -

    --s3-storage-class

    +

    --s3-storage-class

    The storage class to use when storing new objects in Qiniu.

    Properties:

    Advanced options

    -

    Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).

    +

    Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).

    --s3-bucket-acl

    Canned ACL used when creating buckets.

    For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl

    Note that this ACL is applied when only when creating buckets. If it isn't set then "acl" is used instead.

    +

    If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: header is added and the default (private) will be used.

    Properties:

    +

    --s3-might-gzip

    +

    Set this if the backend might gzip objects.

    +

    Normally providers will not alter objects when they are downloaded. If an object was not uploaded with Content-Encoding: gzip then it won't be set on download.

    +

    However some providers may gzip objects even if they weren't uploaded with Content-Encoding: gzip (eg Cloudflare).

    +

    A symptom of this would be receiving errors like

    +
    ERROR corrupted on transfer: sizes differ NNN vs MMM
    +

    If you set this flag and rclone downloads an object with Content-Encoding: gzip set and chunked transfer encoding, then rclone will decompress the object on the fly.

    +

    If this is set to unset (the default) then rclone will choose according to the provider setting what to apply, but you can override rclone's choice here.

    +

    Properties:

    +

    --s3-no-system-metadata

    Suppress setting and reading of system metadata

    Properties:

    @@ -13930,7 +14153,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. ... -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) ... Storage> s3 @@ -14061,7 +14284,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. [snip] - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) [snip] Storage> 5 @@ -14183,7 +14406,7 @@ e/n/d/r/c/s/q> q \ "alias" 2 / Amazon Drive \ "amazon cloud drive" - 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio, IBM COS) + 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, Liara, ArvanCloud, Minio, IBM COS) \ "s3" 4 / Backblaze B2 \ "b2" @@ -14333,7 +14556,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) [snip] Storage> s3 @@ -14428,7 +14651,7 @@ name> ionos-fra Type of storage to configure. Choose a number from below, or type in your own value. [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) [snip] Storage> s3 @@ -14618,7 +14841,7 @@ n/s/q> n \ (alias) 4 / Amazon Drive \ (amazon cloud drive) - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi \ (s3) [snip] Storage> s3 @@ -14818,7 +15041,7 @@ name> remote
    Type of storage to configure.
     Choose a number from below, or type in your own value.
     [snip]
    -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS
    +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS
        \ (s3)
     [snip]
     Storage> s3
    @@ -14943,7 +15166,7 @@ name> wasabi Type of storage to configure. Choose a number from below, or type in your own value [snip] -XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio) +XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio, Liara) \ "s3" [snip] Storage> s3 @@ -15045,7 +15268,7 @@ Type of storage to configure. Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value [snip] - 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Minio, and Tencent COS + 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS \ "s3" [snip] Storage> s3 @@ -15147,7 +15370,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. ... - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS \ (s3) ... Storage> s3 @@ -15375,6 +15598,95 @@ y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y +

    Liara

    +

    Here is an example of making a Liara Object Storage configuration. First run:

    +
    rclone config
    +

    This will guide you through an interactive setup process.

    +
    No remotes found, make a new one?
    +n) New remote
    +s) Set configuration password
    +n/s> n
    +name> Liara
    +Type of storage to configure.
    +Choose a number from below, or type in your own value
    +[snip]
    +XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Liara, Minio)
    +   \ "s3"
    +[snip]
    +Storage> s3
    +Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.
    +Choose a number from below, or type in your own value
    + 1 / Enter AWS credentials in the next step
    +   \ "false"
    + 2 / Get AWS credentials from the environment (env vars or IAM)
    +   \ "true"
    +env_auth> 1
    +AWS Access Key ID - leave blank for anonymous access or runtime credentials.
    +access_key_id> YOURACCESSKEY
    +AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.
    +secret_access_key> YOURSECRETACCESSKEY
    +Region to connect to.
    +Choose a number from below, or type in your own value
    +   / The default endpoint
    + 1 | US Region, Northern Virginia, or Pacific Northwest.
    +   | Leave location constraint empty.
    +   \ "us-east-1"
    +[snip]
    +region>
    +Endpoint for S3 API.
    +Leave blank if using Liara to use the default endpoint for the region.
    +Specify if using an S3 clone such as Ceph.
    +endpoint> storage.iran.liara.space
    +Canned ACL used when creating buckets and/or storing objects in S3.
    +For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
    +Choose a number from below, or type in your own value
    + 1 / Owner gets FULL_CONTROL. No one else has access rights (default).
    +   \ "private"
    +[snip]
    +acl>
    +The server-side encryption algorithm used when storing this object in S3.
    +Choose a number from below, or type in your own value
    + 1 / None
    +   \ ""
    + 2 / AES256
    +   \ "AES256"
    +server_side_encryption>
    +The storage class to use when storing objects in S3.
    +Choose a number from below, or type in your own value
    + 1 / Default
    +   \ ""
    + 2 / Standard storage class
    +   \ "STANDARD"
    +storage_class>
    +Remote config
    +--------------------
    +[Liara]
    +env_auth = false
    +access_key_id = YOURACCESSKEY
    +secret_access_key = YOURSECRETACCESSKEY
    +endpoint = storage.iran.liara.space
    +location_constraint =
    +acl =
    +server_side_encryption =
    +storage_class =
    +--------------------
    +y) Yes this is OK
    +e) Edit this remote
    +d) Delete this remote
    +y/e/d> y
    +

    This will leave the config file looking like this.

    +
    [Liara]
    +type = s3
    +provider = Liara
    +env_auth = false
    +access_key_id = YOURACCESSKEY
    +secret_access_key = YOURSECRETACCESSKEY
    +region =
    +endpoint = storage.iran.liara.space
    +location_constraint =
    +acl =
    +server_side_encryption =
    +storage_class =

    ArvanCloud

    ArvanCloud ArvanCloud Object Storage goes beyond the limited traditional file storage. It gives you access to backup and archived files and allows sharing. Files like profile image in the app, images sent by users or scanned documents can be stored securely and easily in our Object Storage service.

    ArvanCloud provides an S3 interface which can be configured for use with rclone like this.

    @@ -15386,7 +15698,7 @@ name> ArvanCloud Type of storage to configure. Choose a number from below, or type in your own value [snip] -XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio) +XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Liara, Minio) \ "s3" [snip] Storage> s3 @@ -15496,7 +15808,7 @@ n/s/q> n \ "alias" 3 / Amazon Drive \ "amazon cloud drive" - 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Minio, and Tencent COS + 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS \ "s3" [snip] Storage> s3 @@ -16031,9 +16343,10 @@ Choose a number from below, or type in your own value \ "enterprise" box_sub_type> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -16116,9 +16429,10 @@ Already have a token - refresh? y) Yes n) No y/n> y -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -16337,9 +16651,10 @@ y/e/d> y

    Note that Box is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".

    Box file names can't have the \ character in. rclone maps this to and from an identical looking unicode equivalent (U+FF3C Fullwidth Reverse Solidus).

    Box only supports filenames up to 255 characters in length.

    +

    Box has API rate limits that sometimes reduce the speed of rclone.

    rclone about is not supported by the Box backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

    See List of backends that do not support rclone about and rclone about

    -

    Cache (DEPRECATED)

    +

    Cache

    The cache remote wraps another existing remote and stores file structure and its data for long running tasks like rclone mount.

    Status

    The cache backend code is working but it currently doesn't have a maintainer so there are outstanding bugs which aren't getting fixed.

    @@ -16773,7 +17088,7 @@ chunk_total_size = 10G

    stats

    Print stats on the cache backend in JSON format.

    rclone backend stats remote: [options] [<arguments>+]
    -

    Chunker (BETA)

    +

    Chunker

    The chunker overlay transparently splits large files into smaller chunks during upload to wrapped remote and transparently assembles them back when the file is downloaded. This allows to effectively overcome size limits imposed by storage providers.

    Configuration

    To use it, first set up the underlying remote following the configuration instructions for that remote. You can also use a local pathname instead of a remote.

    @@ -17091,9 +17406,10 @@ y) Yes n) No y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -17724,7 +18040,7 @@ rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile -

    Compress (Experimental)

    +

    Compress

    Warning

    This remote is currently experimental. Things may break and data may be lost. Anything you do with this remote is at your own risk. Please understand the risks associated with using experimental code and don't use this remote in critical applications.

    The Compress remote adds compression to another remote. It is best used with remotes containing many large compressible files.

    @@ -17972,6 +18288,8 @@ y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y +

    See the remote setup docs for how to set it up on a machine with no Internet browser available.

    +

    Note that rclone runs a webserver on your local machine to collect the token as returned from Dropbox. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on http://127.0.0.1:53682/ and it may require you to unblock it temporarily if you are running a host firewall, or use manual mode.

    You can then use it like this,

    List directories in top level of your dropbox

    rclone lsd remote:
    @@ -18595,7 +18913,7 @@ rclone lsf :ftp,host=speedtest.tele2.net,user=anonymous,pass=IXs2wc8OJOz7SYLBk47

    --ftp-tls

    Use Implicit FTPS (FTP over TLS).

    -

    When using implicit FTP over TLS the client connects using TLS right from the start which breaks compatibility with non-TLS-aware servers. This is usually served over port 990 rather than port 21. Cannot be used in combination with explicit FTP.

    +

    When using implicit FTP over TLS the client connects using TLS right from the start which breaks compatibility with non-TLS-aware servers. This is usually served over port 990 rather than port 21. Cannot be used in combination with explicit FTPS.

    Properties:

    --ftp-explicit-tls

    Use Explicit FTPS (FTP over TLS).

    -

    When using explicit FTP over TLS the client explicitly requests security from the server in order to upgrade a plain text connection to an encrypted one. Cannot be used in combination with implicit FTP.

    +

    When using explicit FTP over TLS the client explicitly requests security from the server in order to upgrade a plain text connection to an encrypted one. Cannot be used in combination with implicit FTPS.

    Properties:

    Configuration

    -

    Here is an example of making a mailru configuration. First create a Mail.ru Cloud account and choose a tariff, then run

    +

    Here is an example of making a mailru configuration.

    +

    First create a Mail.ru Cloud account and choose a tariff.

    +

    You will need to log in and create an app password for rclone. Rclone will not work with your normal username and password - it will give an error like oauth2: server response missing access_token.

    + +

    Now run

    rclone config

    This will guide you through an interactive setup process:

    No remotes found, make a new one?
    @@ -22480,6 +22822,10 @@ User name (usually email)
     Enter a string value. Press Enter for the default ("").
     user> username@mail.ru
     Password
    +
    +This must be an app password - rclone will not work with your normal
    +password. See the Configuration section in the docs for how to make an
    +app password.
     y) Yes type in my own password
     g) Generate random password
     y/g> y
    @@ -22598,6 +22944,7 @@ y/e/d> y

    --mailru-pass

    Password.

    +

    This must be an app password - rclone will not work with your normal password. See the Configuration section in the docs for how to make an app password.

    NB Input to this must be obscured - see rclone obscure.

    Properties:

    -

    --azureblob-use-emulator

    -

    Uses local storage emulator if provided as 'true'.

    -

    Leave blank if using real azure storage endpoint.

    -

    Properties:

    - -

    Advanced options

    -

    Here are the Advanced options specific to azureblob (Microsoft Azure Blob Storage).

    --azureblob-msi-object-id

    Object ID of the user-assigned MSI to use, if any.

    Leave blank if msi_client_id or msi_mi_res_id specified.

    @@ -23352,6 +23866,16 @@ container/
  • Type: string
  • Required: false
  • +

    --azureblob-use-emulator

    +

    Uses local storage emulator if provided as 'true'.

    +

    Leave blank if using real azure storage endpoint.

    +

    Properties:

    +

    --azureblob-endpoint

    Endpoint for the service.

    Leave blank normally.

    @@ -23491,6 +24015,16 @@ container/ +

    --azureblob-no-check-container

    +

    If set, don't attempt to check the container exists or create it.

    +

    This can be useful when trying to minimise the number of transactions rclone does if you know the container exists already.

    +

    Properties:

    +

    --azureblob-no-head-object

    If set, do not do HEAD before GET when getting objects.

    Properties:

    @@ -23500,14 +24034,24 @@ container/
  • Type: bool
  • Default: false
  • +

    Custom upload headers

    +

    You can set custom upload headers with the --header-upload flag.

    + +

    Eg --header-upload "Content-Type: text/potato"

    Limitations

    MD5 sums are only uploaded with chunked files if the source has an MD5 sum. This will always be the case for a local to azure copy.

    rclone about is not supported by the Microsoft Azure Blob storage backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

    See List of backends that do not support rclone about and rclone about

    Azure Storage Emulator Support

    -

    You can run rclone with storage emulator (usually azurite).

    -

    To do this, just set up a new remote with rclone config following instructions described in introduction and set use_emulator config as true. You do not need to provide default account name neither an account key.

    -

    Also, if you want to access a storage emulator instance running on a different machine, you can override Endpoint parameter in advanced settings, setting it to http(s)://<host>:<port>/devstoreaccount1 (e.g. http://10.254.2.5:10000/devstoreaccount1).

    +

    You can run rclone with the storage emulator (usually azurite).

    +

    To do this, just set up a new remote with rclone config following the instructions in the introduction and set use_emulator in the advanced settings as true. You do not need to provide a default account name nor an account key. But you can override them in the account and key options. (Prior to v1.61 they were hard coded to azurite's devstoreaccount1.)

    +

    Also, if you want to access a storage emulator instance running on a different machine, you can override the endpoint parameter in the advanced settings, setting it to http(s)://<host>:<port>/devstoreaccount1 (e.g. http://10.254.2.5:10000/devstoreaccount1).

    Microsoft OneDrive

    Paths are specified as remote:path

    Paths may be as deep as required, e.g. remote:directory/subdirectory.

    @@ -23546,9 +24090,10 @@ y) Yes n) No y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -24059,6 +24604,29 @@ Description: Due to a configuration change made by your administrator, or becaus

    If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run rclone config, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: Already have a token - refresh?. For this question, answer y and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.

    On Sharepoint and OneDrive for Business, rclone link may return an "Invalid request" error. A possible cause is that the organisation admin didn't allow public links to be made for the organisation/sharepoint library. To fix the permissions as an admin, take a look at the docs: 1, 2.

    +

    Can not access Shared with me files

    +

    Shared with me files is not supported by rclone currently, but there is a workaround:

    +
      +
    1. Visit https://onedrive.live.com

    2. +
    3. Right click a item in Shared, then click Add shortcut to My files in the context

      +
      +

      Screenshot (Shared with me)

      +
      +
      make_shortcut
      +
      +
    4. +
    5. The shortcut will appear in My files, you can access it with rclone, it behaves like a normal folder/file.

      +
      +

      Screenshot (My Files)

      +
      +
      in_my_files
      +
      +
    6. +
    +
    +

    Screenshot (rclone mount)

    +rclone_mount +

    OpenDrive

    Paths are specified as remote:path

    Paths may be as deep as required, e.g. remote:directory/subdirectory.

    @@ -24707,7 +25275,7 @@ y/e/d> y

    Note that incomplete multipart uploads older than 24 hours can be removed with rclone cleanup remote:bucket just for one bucket rclone cleanup remote: for all buckets. QingStor does not ever remove incomplete multipart uploads so it may be necessary to run this from time to time.

    Buckets and Zone

    With QingStor you can list buckets (rclone lsd) using any zone, but you can only access the content of a bucket from the zone it was created in. If you attempt to access a bucket from the wrong zone, you will get an error, incorrect zone, the bucket is not in 'XXX' zone.

    -

    Authentication

    +

    Authentication

    There are two ways to supply rclone with a set of QingStor credentials. In order of precedence:

    Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.

    +

    This must be false if you use either ciphers or key_exchange advanced options.

    Properties:

    +

    --sftp-ciphers

    +

    Space separated list of ciphers to be used for session encryption, ordered by preference.

    +

    At least one must match with server configuration. This can be checked for example using ssh -Q cipher.

    +

    This must not be set if use_insecure_cipher is true.

    +

    Example:

    +
    aes128-ctr aes192-ctr aes256-ctr aes128-gcm@openssh.com aes256-gcm@openssh.com
    +

    Properties:

    + +

    --sftp-key-exchange

    +

    Space separated list of key exchange algorithms, ordered by preference.

    +

    At least one must match with server configuration. This can be checked for example using ssh -Q kex.

    +

    This must not be set if use_insecure_cipher is true.

    +

    Example:

    +
    sntrup761x25519-sha512@openssh.com curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256
    +

    Properties:

    + +

    --sftp-macs

    +

    Space separated list of MACs (message authentication code) algorithms, ordered by preference.

    +

    At least one must match with server configuration. This can be checked for example using ssh -Q mac.

    +

    Example:

    +
    umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
    +

    Properties:

    +

    Limitations

    On some SFTP servers (e.g. Synology) the paths are different for SSH and SFTP so the hashes can't be calculated properly. For them using disable_hashcheck is a good idea.

    The only ssh agent supported under Windows is Putty's pageant.

    @@ -28029,9 +28640,10 @@ client_id> Yandex Client Secret - leave blank normally. client_secret> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -28182,9 +28794,10 @@ y) Yes n) No (default) y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> @@ -28967,6 +29580,193 @@ $ tree /tmp/b
  • "error": return an error based on option value
  • Changelog

    +

    v1.61.0 - 2022-12-20

    +

    See commits

    + +

    v1.60.1 - 2022-11-17

    +

    See commits

    +

    v1.60.0 - 2022-10-21

    See commits

    Contact the rclone project

    Forum

    diff --git a/MANUAL.md b/MANUAL.md index c7030525b..59c2cef30 100644 --- a/MANUAL.md +++ b/MANUAL.md @@ -1,6 +1,6 @@ % rclone(1) User Manual % Nick Craig-Wood -% Oct 21, 2022 +% Dec 20, 2022 # Rclone syncs your files to cloud storage @@ -132,6 +132,7 @@ WebDAV or S3, that work out of the box.) - IDrive e2 - IONOS Cloud - Koofr +- Liara Object Storage - Mail.ru Cloud - Memset Memstore - Mega @@ -315,9 +316,9 @@ The simplest fix is to run Fetch the correct binary for your processor type by clicking on these links. If not sure, use the first link. -- [Intel/AMD - 64 Bit](https://downloads.rclone.org/rclone-current-linux-amd64.zip) -- [Intel/AMD - 32 Bit](https://downloads.rclone.org/rclone-current-linux-386.zip) -- [ARM - 64 Bit](https://downloads.rclone.org/rclone-current-linux-arm64.zip) +- [Intel/AMD - 64 Bit](https://downloads.rclone.org/rclone-current-windows-amd64.zip) +- [Intel/AMD - 32 Bit](https://downloads.rclone.org/rclone-current-windows-386.zip) +- [ARM - 64 Bit](https://downloads.rclone.org/rclone-current-windows-arm64.zip) Open this file in the Explorer and extract `rclone.exe`. Rclone is a portable executable so you can place it wherever is convenient. @@ -2365,7 +2366,7 @@ This will look something like (some irrelevant detail removed): "State": "*oauth-islocal,teamdrive,,", "Option": { "Name": "config_is_local", - "Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n", + "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Default": true, "Examples": [ { @@ -2760,7 +2761,7 @@ This will look something like (some irrelevant detail removed): "State": "*oauth-islocal,teamdrive,,", "Option": { "Name": "config_is_local", - "Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n", + "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Default": true, "Examples": [ { @@ -3689,6 +3690,7 @@ rclone lsjson remote:path [flags] --hash Include hashes in the output (may take longer) --hash-type stringArray Show only this hash type (may be repeated) -h, --help help for lsjson + -M, --metadata Add metadata to the listing --no-mimetype Don't read the mime type (can speed things up) --no-modtime Don't read the modification time (can speed things up) --original Show the ID of the underlying Object @@ -4419,14 +4421,14 @@ rclone mount remote:path /path/to/mountpoint [flags] --allow-other Allow access to other users (not supported on Windows) --allow-root Allow access to root user (not supported on Windows) --async-read Use asynchronous reads (not supported on Windows) (default true) - --attr-timeout duration Time for which file/directory attributes are cached (default 1s) + --attr-timeout Duration Time for which file/directory attributes are cached (default 1s) --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows) - --daemon-timeout duration Time limit for rclone to respond to kernel (not supported on Windows) - --daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) + --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s) + --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) --debug-fuse Debug the FUSE internals - needs -v --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) @@ -4440,24 +4442,24 @@ rclone mount remote:path /path/to/mountpoint [flags] --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true) --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only) -o, --option stringArray Option for libfuse/WinFsp (repeat if required) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` @@ -4547,11 +4549,12 @@ press '?' to toggle the help on and off. The supported keys are: ↑,↓ or k,j to Move →,l to enter ←,h to return - c toggle counts g toggle graph + c toggle counts a toggle average size in directory + m toggle modified time u toggle human-readable format - n,s,C,A sort by name,size,count,average size + n,s,C,A,M sort by name,size,count,asize,mtime d delete file/directory v select file/directory V enter visual select mode @@ -4803,6 +4806,101 @@ the browser when rclone is run. See the [rc documentation](https://rclone.org/rc/) for more info on the rc flags. +## Server options + +Use `--addr` to specify which IP address and port the server should +listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all +IPs. By default it only listens on localhost. You can use port +:0 to let the OS choose an available port. + +If you set `--addr` to listen on a public or LAN accessible IP address +then using Authentication is advised - see the next section for info. + +You can use a unix socket by setting the url to `unix:///path/to/socket` +or just by using an absolute path name. Note that unix sockets bypass the +authentication - this is expected to be done with file system permissions. + +`--addr` may be repeated to listen on multiple IPs/ports/sockets. + +`--server-read-timeout` and `--server-write-timeout` can be used to +control the timeouts on the server. Note that this is the total time +for a transfer. + +`--max-header-bytes` controls the maximum number of bytes the server will +accept in the HTTP header. + +`--baseurl` controls the URL prefix that rclone serves from. By default +rclone will serve from the root. If you used `--baseurl "/rclone"` then +rclone would serve from a URL starting with "/rclone/". This is +useful if you wish to proxy rclone serve. Rclone automatically +inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`, +`--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated +identically. + +### TLS (SSL) + +By default this will serve over http. If you want you can serve over +https. You will need to supply the `--cert` and `--key` flags. +If you wish to do client side certificate validation then you will need to +supply `--client-ca` also. + +`--cert` should be a either a PEM encoded certificate or a concatenation +of that with the CA certificate. `--key` should be the PEM encoded +private key and `--client-ca` should be the PEM encoded client +certificate authority certificate. + +--min-tls-version is minimum TLS version that is acceptable. Valid + values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default + "tls1.0"). + +### Template + +`--template` allows a user to specify a custom markup template for HTTP +and WebDAV serve functions. The server exports the following markup +to be used within the template to server pages: + +| Parameter | Description | +| :---------- | :---------- | +| .Name | The full path of a file/directory. | +| .Title | Directory listing of .Name | +| .Sort | The current sort used. This is changeable via ?sort= parameter | +| | Sort Options: namedirfirst,name,size,time (default namedirfirst) | +| .Order | The current ordering used. This is changeable via ?order= parameter | +| | Order Options: asc,desc (default asc) | +| .Query | Currently unused. | +| .Breadcrumb | Allows for creating a relative navigation | +|-- .Link | The relative to the root link of the Text. | +|-- .Text | The Name of the directory. | +| .Entries | Information about a specific file/directory. | +|-- .URL | The 'url' of an entry. | +|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. | +|-- .IsDir | Boolean for if an entry is a directory or not. | +|-- .Size | Size in Bytes of the entry. | +|-- .ModTime | The UTC timestamp of an entry. | + +### Authentication + +By default this will serve files without needing a login. + +You can either use an htpasswd file which can take lots of users, or +set a single username and password with the `--user` and `--pass` flags. + +Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is +in standard apache format and supports MD5, SHA1 and BCrypt for basic +authentication. Bcrypt is recommended. + +To create an htpasswd file: + + touch htpasswd + htpasswd -B htpasswd user + htpasswd -B htpasswd anotherUser + +The password file can be updated while rclone is running. + +Use `--realm` to set the authentication realm. + +Use `--salt` to change the password hashing salt from the default. + ``` rclone rcd * [flags] @@ -5332,8 +5430,8 @@ rclone serve dlna remote:path [flags] ``` --addr string The ip:port or :port to bind the DLNA http server to (default ":7879") - --announce-interval duration The interval between SSDP announcements (default 12m0s) - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --announce-interval Duration The interval between SSDP announcements (default 12m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -5344,24 +5442,24 @@ rclone serve dlna remote:path [flags] --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](https://rclone.org/flags/) for global options not listed here. @@ -5746,15 +5844,15 @@ rclone serve docker [flags] --allow-other Allow access to other users (not supported on Windows) --allow-root Allow access to root user (not supported on Windows) --async-read Use asynchronous reads (not supported on Windows) (default true) - --attr-timeout duration Time for which file/directory attributes are cached (default 1s) + --attr-timeout Duration Time for which file/directory attributes are cached (default 1s) --base-dir string Base directory for volumes (default "/var/lib/docker-volumes/rclone") --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows) - --daemon-timeout duration Time limit for rclone to respond to kernel (not supported on Windows) - --daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) + --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s) + --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) --debug-fuse Debug the FUSE internals - needs -v --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --forget-state Skip restoring previous state @@ -5770,26 +5868,26 @@ rclone serve docker [flags] --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true) --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only) -o, --option stringArray Option for libfuse/WinFsp (repeat if required) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --socket-addr string Address or absolute path (default: /run/docker/plugins/rclone.sock) --socket-gid int GID for unix socket (default: current process GID) (default 1000) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` @@ -6237,7 +6335,7 @@ rclone serve ftp remote:path [flags] --addr string IPaddress:Port or :Port to bind server to (default "localhost:2121") --auth-proxy string A program to use to create the backend from the auth --cert string TLS PEM key (concatenation of certificate and CA certificate) - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -6248,26 +6346,26 @@ rclone serve ftp remote:path [flags] --no-seek Don't allow seeking in files --pass string Password for authentication (empty value allow every password) --passive-port string Passive port range to use (default "30000-32000") - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --public-ip string Public IP address to advertise for passive connections --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication (default "anonymous") - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](https://rclone.org/flags/) for global options not listed here. @@ -6304,6 +6402,12 @@ IPs. By default it only listens on localhost. You can use port If you set `--addr` to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. +You can use a unix socket by setting the url to `unix:///path/to/socket` +or just by using an absolute path name. Note that unix sockets bypass the +authentication - this is expected to be done with file system permissions. + +`--addr` may be repeated to listen on multiple IPs/ports/sockets. + `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -6319,7 +6423,7 @@ inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`, `--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated identically. -### SSL/TLS +### TLS (SSL) By default this will serve over http. If you want you can serve over https. You will need to supply the `--cert` and `--key` flags. @@ -6709,47 +6813,47 @@ rclone serve http remote:path [flags] ## Options ``` - --addr string IPaddress:Port or :Port to bind server to (default "127.0.0.1:8080") + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --baseurl string Prefix for URLs - leave blank for root - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for http --htpasswd string A htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --realm string Realm for authentication --salt string Password hashing salt (default "dlPL2MqE") - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](https://rclone.org/flags/) for global options not listed here. @@ -6764,7 +6868,7 @@ Serve the remote for restic's REST API. ## Synopsis -Run a basic web server to serve a remove over restic's REST backend +Run a basic web server to serve a remote over restic's REST backend API over HTTP. This allows restic to use rclone as a data storage mechanism for cloud providers that restic does not support directly. @@ -6849,13 +6953,19 @@ with a path of `//`. ## Server options Use `--addr` to specify which IP address and port the server should -listen on, e.g. `--addr 1.2.3.4:8000` or `--addr :8080` to -listen to all IPs. By default it only listens on localhost. You can use port +listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all +IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port. If you set `--addr` to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. +You can use a unix socket by setting the url to `unix:///path/to/socket` +or just by using an absolute path name. Note that unix sockets bypass the +authentication - this is expected to be done with file system permissions. + +`--addr` may be repeated to listen on multiple IPs/ports/sockets. + `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -6871,28 +6981,21 @@ inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`, `--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated identically. -`--template` allows a user to specify a custom markup template for HTTP -and WebDAV serve functions. The server exports the following markup -to be used within the template to server pages: +### TLS (SSL) -| Parameter | Description | -| :---------- | :---------- | -| .Name | The full path of a file/directory. | -| .Title | Directory listing of .Name | -| .Sort | The current sort used. This is changeable via ?sort= parameter | -| | Sort Options: namedirfirst,name,size,time (default namedirfirst) | -| .Order | The current ordering used. This is changeable via ?order= parameter | -| | Order Options: asc,desc (default asc) | -| .Query | Currently unused. | -| .Breadcrumb | Allows for creating a relative navigation | -|-- .Link | The relative to the root link of the Text. | -|-- .Text | The Name of the directory. | -| .Entries | Information about a specific file/directory. | -|-- .URL | The 'url' of an entry. | -|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. | -|-- .IsDir | Boolean for if an entry is a directory or not. | -|-- .Size | Size in Bytes of the entry. | -|-- .ModTime | The UTC timestamp of an entry. | +By default this will serve over http. If you want you can serve over +https. You will need to supply the `--cert` and `--key` flags. +If you wish to do client side certificate validation then you will need to +supply `--client-ca` also. + +`--cert` should be a either a PEM encoded certificate or a concatenation +of that with the CA certificate. `--key` should be the PEM encoded +private key and `--client-ca` should be the PEM encoded client +certificate authority certificate. + +--min-tls-version is minimum TLS version that is acceptable. Valid + values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default + "tls1.0"). ### Authentication @@ -6915,21 +7018,7 @@ The password file can be updated while rclone is running. Use `--realm` to set the authentication realm. -### SSL/TLS - -By default this will serve over HTTP. If you want you can serve over -HTTPS. You will need to supply the `--cert` and `--key` flags. -If you wish to do client side certificate validation then you will need to -supply `--client-ca` also. - -`--cert` should be either a PEM encoded certificate or a concatenation -of that with the CA certificate. `--key` should be the PEM encoded -private key and `--client-ca` should be the PEM encoded client -certificate authority certificate. - ---min-tls-version is minimum TLS version that is acceptable. Valid - values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default - "tls1.0"). +Use `--salt` to change the password hashing salt from the default. ``` @@ -6939,24 +7028,24 @@ rclone serve restic remote:path [flags] ## Options ``` - --addr string IPaddress:Port or :Port to bind server to (default "localhost:8080") + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --append-only Disallow deletion of repository data --baseurl string Prefix for URLs - leave blank for root --cache-objects Cache listed objects (default true) - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with -h, --help help for restic - --htpasswd string htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --htpasswd string A htpasswd file - if not provided no authentication is done + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --pass string Password for authentication --private-repos Users can only access their private repo - --realm string Realm for authentication (default "rclone") - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --realm string Realm for authentication + --salt string Password hashing salt (default "dlPL2MqE") + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --stdio Run an HTTP2 server on stdin/stdout - --template string User-specified template --user string User name for authentication ``` @@ -7435,7 +7524,7 @@ rclone serve sftp remote:path [flags] --addr string IPaddress:Port or :Port to bind server to (default "localhost:2022") --auth-proxy string A program to use to create the backend from the auth --authorized-keys string Authorized keys file (default "~/.ssh/authorized_keys") - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -7446,26 +7535,26 @@ rclone serve sftp remote:path [flags] --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --stdio Run an sftp server on stdin/stdout --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](https://rclone.org/flags/) for global options not listed here. @@ -7500,13 +7589,19 @@ to see the full list. ## Server options Use `--addr` to specify which IP address and port the server should -listen on, e.g. `--addr 1.2.3.4:8000` or `--addr :8080` to -listen to all IPs. By default it only listens on localhost. You can use port +listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all +IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port. If you set `--addr` to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. +You can use a unix socket by setting the url to `unix:///path/to/socket` +or just by using an absolute path name. Note that unix sockets bypass the +authentication - this is expected to be done with file system permissions. + +`--addr` may be repeated to listen on multiple IPs/ports/sockets. + `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -7522,6 +7617,24 @@ inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`, `--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated identically. +### TLS (SSL) + +By default this will serve over http. If you want you can serve over +https. You will need to supply the `--cert` and `--key` flags. +If you wish to do client side certificate validation then you will need to +supply `--client-ca` also. + +`--cert` should be a either a PEM encoded certificate or a concatenation +of that with the CA certificate. `--key` should be the PEM encoded +private key and `--client-ca` should be the PEM encoded client +certificate authority certificate. + +--min-tls-version is minimum TLS version that is acceptable. Valid + values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default + "tls1.0"). + +### Template + `--template` allows a user to specify a custom markup template for HTTP and WebDAV serve functions. The server exports the following markup to be used within the template to server pages: @@ -7566,21 +7679,7 @@ The password file can be updated while rclone is running. Use `--realm` to set the authentication realm. -### SSL/TLS - -By default this will serve over HTTP. If you want you can serve over -HTTPS. You will need to supply the `--cert` and `--key` flags. -If you wish to do client side certificate validation then you will need to -supply `--client-ca` also. - -`--cert` should be either a PEM encoded certificate or a concatenation -of that with the CA certificate. `--key` should be the PEM encoded -private key and `--client-ca` should be the PEM encoded client -certificate authority certificate. - ---min-tls-version is minimum TLS version that is acceptable. Valid - values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default - "tls1.0"). +Use `--salt` to change the password hashing salt from the default. ## VFS - Virtual File System @@ -7989,49 +8088,50 @@ rclone serve webdav remote:path [flags] ## Options ``` - --addr string IPaddress:Port or :Port to bind server to (default "localhost:8080") + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --disable-dir-list Disable HTML directory list on GET request for a directory --etag-hash string Which hash to use for the ETag, or auto or blank for off --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for webdav - --htpasswd string htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --htpasswd string A htpasswd file - if not provided no authentication is done + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access - --realm string Realm for authentication (default "rclone") - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --realm string Realm for authentication + --salt string Password hashing salt (default "dlPL2MqE") + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](https://rclone.org/flags/) for global options not listed here. @@ -8134,7 +8234,7 @@ rclone test changenotify remote: [flags] ``` -h, --help help for changenotify - --poll-interval duration Time to wait between polling for changes (default 10s) + --poll-interval Duration Time to wait between polling for changes (default 10s) ``` See the [global flags page](https://rclone.org/flags/) for global options not listed here. @@ -8199,7 +8299,7 @@ rclone test info [remote:path]+ [flags] --check-normalization Check UTF-8 Normalization --check-streaming Check uploads with indeterminate file size -h, --help help for info - --upload-wait duration Wait after writing a file + --upload-wait Duration Wait after writing a file (default 0s) --write-json string Write results to file ``` @@ -8251,6 +8351,7 @@ rclone test makefiles [flags] --files int Number of files to create (default 1000) --files-per-directory int Average number of files per directory (default 10) -h, --help help for makefiles + --max-depth int Maximum depth of directory hierarchy (default 10) --max-file-size SizeSuffix Maximum size of files to create (default 100) --max-name-length int Maximum size of file names (default 12) --min-file-size SizeSuffix Minimum size of file to create @@ -8378,7 +8479,6 @@ rclone tree remote:path [flags] ``` -a, --all All files are listed (list . files too) - -C, --color Turn colorization on always -d, --dirs-only List directories only --dirsfirst List directories before files (-U disables) --full-path Print the full path prefix for each file @@ -8602,8 +8702,17 @@ Will get their own names ### Valid remote names Remote names are case sensitive, and must adhere to the following rules: - - May only contain `0`-`9`, `A`-`Z`, `a`-`z`, `_`, `-`, `.` and space. + - May contain number, letter, `_`, `-`, `.` and space. - May not start with `-` or space. + - May not end with space. + +Starting with rclone version 1.61, any Unicode numbers and letters are allowed, +while in older versions it was limited to plain ASCII (0-9, A-Z, a-z). If you use +the same rclone configuration from different shells, which may be configured with +different character encoding, you must be cautious to use characters that are +possible to write in all of them. This is mostly a problem on Windows, where +the console traditionally uses a non-Unicode character set - defined +by the so-called "code page". Quoting and the shell --------------------- @@ -9087,6 +9196,16 @@ quicker than without the `--checksum` flag. When using this flag, rclone won't update mtimes of remote files if they are incorrect as it would normally. +### --color WHEN ### + +Specifiy when colors (and other ANSI codes) should be added to the output. + +`AUTO` (default) only allows ANSI codes when the output is a terminal + +`NEVER` never allow ANSI codes + +`ALWAYS` always add ANSI codes, regardless of the output format (terminal or file) + ### --compare-dest=DIR ### When using `sync`, `copy` or `move` DIR is checked in addition to the @@ -10600,6 +10719,12 @@ For the filtering options * `--min-age` * `--max-age` * `--dump filters` + * `--metadata-include` + * `--metadata-include-from` + * `--metadata-exclude` + * `--metadata-exclude-from` + * `--metadata-filter` + * `--metadata-filter-from` See the [filtering section](https://rclone.org/filtering/). @@ -10805,15 +10930,16 @@ two ways of doing it, described below. ## Configuring using rclone authorize ## -On the headless box run `rclone` config but answer `N` to the `Use -auto config?` question. +On the headless box run `rclone` config but answer `N` to the `Use web browser +to automatically authenticate?` question. ``` ... Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> n @@ -10889,15 +11015,16 @@ Linux and MacOS users can utilize SSH Tunnel to redirect the headless box port 5 ``` ssh -L localhost:53682:localhost:53682 username@remote_server ``` -Then on the headless box run `rclone` config and answer `Y` to the `Use -auto config?` question. +Then on the headless box run `rclone` config and answer `Y` to the `Use web +browser to automatically authenticate?` question. ``` ... Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> y @@ -10932,7 +11059,7 @@ you expect. Instead use a `--filter...` flag. ## Patterns for matching path/file names -### Pattern syntax +### Pattern syntax {#patterns} Here is a formal definition of the pattern syntax, [examples](#examples) are below. @@ -11094,7 +11221,7 @@ them into regular expressions. | Rooted Regexp | `/{{.*\.jpe?g}}` | `/file.jpeg` | `/file.png` | | | | `/file.jpg` | `/dir/file.jpg` | -## How filter rules are applied to files +## How filter rules are applied to files {#how-filter-rules-work} Rclone path/file name filters are made up of one or more of the following flags: @@ -11657,6 +11784,43 @@ E.g. for the following directory structure: The command `rclone ls --exclude-if-present .ignore dir1` does not list `dir3`, `file3` or `.ignore`. +## Metadata filters {#metadata} + +The metadata filters work in a very similar way to the normal file +name filters, except they match [metadata](https://rclone.org/docs/#metadata) on the +object. + +The metadata should be specified as `key=value` patterns. This may be +wildcarded using the normal [filter patterns](#patterns) or [regular +expressions](#regexp). + +For example if you wished to list only local files with a mode of +`100664` you could do that with: + + rclone lsf -M --files-only --metadata-include "mode=100664" . + +Or if you wished to show files with an `atime`, `mtime` or `btime` at a given date: + + rclone lsf -M --files-only --metadata-include "[abm]time=2022-12-16*" . + +Like file filtering, metadata filtering only applies to files not to +directories. + +The filters can be applied using these flags. + +- `--metadata-include` - Include metadatas matching pattern +- `--metadata-include-from` - Read metadata include patterns from file (use - to read from stdin) +- `--metadata-exclude` - Exclude metadatas matching pattern +- `--metadata-exclude-from` - Read metadata exclude patterns from file (use - to read from stdin) +- `--metadata-filter` - Add a metadata filtering rule +- `--metadata-filter-from` - Read metadata filtering patterns from a file (use - to read from stdin) + +Each flag can be repeated. See the section on [how filter rules are +applied](#how-filter-rules-work) for more details - these flags work +in an identical way to the file name filtering flags, but instead of +file name patterns have metadata patterns. + + ## Common pitfalls The most frequent filter support issues on @@ -12382,6 +12546,14 @@ See the [config providers](https://rclone.org/commands/rclone_config_providers/) **Authentication is required for this call.** +### config/setpath: Set the path of the config file {#config-setpath} + +Parameters: + +- path - path to the config file to use + +**Authentication is required for this call.** + ### config/update: update the config for a remote. {#config-update} This takes the following parameters: @@ -12480,7 +12652,7 @@ Returns: "result": "" } -OR +OR { "error": true, "result": "" @@ -12675,6 +12847,22 @@ Parameters: - rate - int +### debug/set-gc-percent: Call runtime/debug.SetGCPercent for setting the garbage collection target percentage. {#debug-set-gc-percent} + +SetGCPercent sets the garbage collection target percentage: a collection is triggered +when the ratio of freshly allocated data to live data remaining after the previous collection +reaches this percentage. SetGCPercent returns the previous setting. The initial setting is the +value of the GOGC environment variable at startup, or 100 if the variable is not set. + +This setting may be effectively reduced in order to maintain a memory limit. +A negative percentage effectively disables garbage collection, unless the memory limit is reached. + +See https://pkg.go.dev/runtime/debug#SetMemoryLimit for more details. + +Parameters: + +- gc-percent - int + ### debug/set-mutex-profile-fraction: Set runtime.SetMutexProfileFraction for mutex profiling. {#debug-set-mutex-profile-fraction} SetMutexProfileFraction controls the fraction of mutex contention @@ -12696,6 +12884,38 @@ Results: - previousRate - int +### debug/set-soft-memory-limit: Call runtime/debug.SetMemoryLimit for setting a soft memory limit for the runtime. {#debug-set-soft-memory-limit} + +SetMemoryLimit provides the runtime with a soft memory limit. + +The runtime undertakes several processes to try to respect this memory limit, including +adjustments to the frequency of garbage collections and returning memory to the underlying +system more aggressively. This limit will be respected even if GOGC=off (or, if SetGCPercent(-1) is executed). + +The input limit is provided as bytes, and includes all memory mapped, managed, and not +released by the Go runtime. Notably, it does not account for space used by the Go binary +and memory external to Go, such as memory managed by the underlying system on behalf of +the process, or memory managed by non-Go code inside the same process. +Examples of excluded memory sources include: OS kernel memory held on behalf of the process, +memory allocated by C code, and memory mapped by syscall.Mmap (because it is not managed by the Go runtime). + +A zero limit or a limit that's lower than the amount of memory used by the Go runtime may cause +the garbage collector to run nearly continuously. However, the application may still make progress. + +The memory limit is always respected by the Go runtime, so to effectively disable this behavior, +set the limit very high. math.MaxInt64 is the canonical value for disabling the limit, but values +much greater than the available memory on the underlying system work just as well. + +See https://go.dev/doc/gc-guide for a detailed guide explaining the soft memory limit in more detail, +as well as a variety of common use-cases and scenarios. + +SetMemoryLimit returns the previously set memory limit. A negative input does not adjust the limit, +and allows for retrieval of the currently set memory limit. + +Parameters: + +- mem-limit - int + ### fscache/clear: Clear the Fs cache. {#fscache-clear} This clears the fs cache. This is where remotes created from backends @@ -14241,7 +14461,7 @@ upon backend-specific capabilities. | Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes | | OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | | OpenStack Swift | Yes † | Yes | No | No | No | Yes | Yes | No | Yes | No | -| Oracle Object Storage | Yes | Yes | No | No | Yes | Yes | No | No | No | No | +| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | No | No | No | | pCloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes | | premiumize.me | Yes | No | Yes | Yes | No | No | No | Yes | Yes | Yes | | put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | Yes | Yes | @@ -14251,7 +14471,7 @@ upon backend-specific capabilities. | Sia | No | No | No | No | No | No | Yes | No | No | Yes | | SMB | No | No | Yes | Yes | No | No | Yes | No | No | Yes | | SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes | -| Storj | Yes † | No | Yes | No | No | Yes | Yes | No | No | No | +| Storj | Yes † | Yes | Yes | No | No | Yes | Yes | No | No | No | | Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No | | WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ | No | Yes | Yes | | Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes | Yes | @@ -14366,9 +14586,10 @@ These flags are available for every command. -c, --checksum Skip based on checksum (if available) & size, not mod-time & size --client-cert string Client SSL certificate (PEM) for mutual TLS auth --client-key string Client SSL private key (PEM) for mutual TLS auth + --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO") --compare-dest stringArray Include additional comma separated server-side paths during comparison --config string Config file (default "$HOME/.config/rclone/rclone.conf") - --contimeout duration Connect timeout (default 1m0s) + --contimeout Duration Connect timeout (default 1m0s) --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cpuprofile string Write cpu profile to file --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD") @@ -14386,16 +14607,16 @@ These flags are available for every command. --dump-headers Dump HTTP headers - may contain sensitive info --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts --exclude stringArray Exclude files matching pattern - --exclude-from stringArray Read exclude patterns from file (use - to read from stdin) + --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-if-present stringArray Exclude directories if filename is present - --expect-continue-timeout duration Timeout when using expect / 100-continue in HTTP (default 1s) + --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s) --fast-list Use recursive list if available; uses more memory but fewer transactions --files-from stringArray Read list of source-file names from file (use - to read from stdin) --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin) - -f, --filter stringArray Add a file-filtering rule - --filter-from stringArray Read filtering patterns from a file (use - to read from stdin) - --fs-cache-expire-duration duration Cache remotes for this long (0 to disable caching) (default 5m0s) - --fs-cache-expire-interval duration Interval to check for expired remotes (default 1m0s) + -f, --filter stringArray Add a file filtering rule + --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin) + --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s) + --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s) --header stringArray Set HTTP header for all transactions --header-download stringArray Set HTTP header for download transactions --header-upload stringArray Set HTTP header for upload transactions @@ -14409,9 +14630,9 @@ These flags are available for every command. -I, --ignore-times Don't skip files that match size and time - transfer all files --immutable Do not modify files, fail if existing files have been modified --include stringArray Include files matching pattern - --include-from stringArray Read include patterns from file (use - to read from stdin) + --include-from stringArray Read file include patterns from file (use - to read from stdin) -i, --interactive Enable interactive mode - --kv-lock-time duration Maximum time to keep key-value database locked by process (default 1s) + --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s) --log-file string Log everything to this file --log-format string Comma separated list of log format options (default "date,time") --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE") @@ -14421,16 +14642,22 @@ These flags are available for every command. --max-backlog int Maximum number of objects in sync or check backlog (default 10000) --max-delete int When synchronizing, limit the number of deletes (default -1) --max-depth int If set limits the recursion depth to this (default -1) - --max-duration duration Maximum duration rclone will transfer data for + --max-duration Duration Maximum duration rclone will transfer data for (default 0s) --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off) --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000) --max-transfer SizeSuffix Maximum size of data to transfer (default off) --memprofile string Write memory profile to file -M, --metadata If set, preserve metadata when copying objects + --metadata-exclude stringArray Exclude metadatas matching pattern + --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin) + --metadata-filter stringArray Add a metadata filtering rule + --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin) + --metadata-include stringArray Include metadatas matching pattern + --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin) --metadata-set stringArray Add metadata key=value when uploading --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) - --modify-window duration Max time diff to be considered the same (default 1ns) + --modify-window Duration Max time diff to be considered the same (default 1ns) --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi) --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4) --no-check-certificate Do not verify the server SSL certificate (insecure) @@ -14446,25 +14673,26 @@ These flags are available for every command. --progress-terminal-title Show progress on the terminal title (requires -P/--progress) -q, --quiet Print as little stuff as possible --rc Enable the remote control server - --rc-addr string IPaddress:Port or :Port to bind server to (default "localhost:5572") + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) --rc-allow-origin string Set the allowed origin for CORS --rc-baseurl string Prefix for URLs - leave blank for root - --rc-cert string SSL PEM key (concatenation of certificate and CA certificate) + --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with --rc-enable-metrics Enable prometheus metrics on /metrics --rc-files string Path to local files to serve on the HTTP server - --rc-htpasswd string htpasswd file - if not provided no authentication is done - --rc-job-expire-duration duration Expire finished async jobs older than this value (default 1m0s) - --rc-job-expire-interval duration Interval to check for expired async jobs (default 10s) - --rc-key string SSL PEM Private key + --rc-htpasswd string A htpasswd file - if not provided no authentication is done + --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) + --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s) + --rc-key string TLS PEM Private key --rc-max-header-bytes int Maximum size of request header (default 4096) --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --rc-no-auth Don't require auth for certain methods --rc-pass string Password for authentication - --rc-realm string Realm for authentication (default "rclone") + --rc-realm string Realm for authentication + --rc-salt string Password hashing salt (default "dlPL2MqE") --rc-serve Enable the serving of remote objects - --rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --rc-template string User-specified template --rc-user string User name for authentication --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest") @@ -14474,10 +14702,10 @@ These flags are available for every command. --rc-web-gui-update Check and update to latest version of web gui --refresh-times Refresh the modtime of remote files --retries int Retry operations this many times if they fail (default 3) - --retries-sleep duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) + --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s) --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs --size-only Skip based on size only, not mod-time or checksum - --stats duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s) + --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s) --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45) --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO") --stats-one-line Make the stats fit on one line @@ -14490,7 +14718,7 @@ These flags are available for every command. --syslog Use Syslog for logging --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON") --temp-dir string Directory rclone will use for temporary files (default "/tmp") - --timeout duration IO idle timeout (default 5m0s) + --timeout Duration IO idle timeout (default 5m0s) --tpslimit float Limit HTTP transactions per second to this --tpslimit-burst int Max burst of transactions for --tpslimit (default 1) --track-renames When synchronizing, track file renames and do a server-side move if possible @@ -14501,7 +14729,7 @@ These flags are available for every command. --use-json-log Use json log format --use-mmap Use mmap allocator (see docs) --use-server-modtime Use server modified time instead of object metadata - --user-agent string Set the user-agent to a specified string (default "rclone/v1.60.0") + --user-agent string Set the user-agent to a specified string (default "rclone/v1.61.0") -v, --verbose count Print lots more stuff (repeat for more) ``` @@ -14511,529 +14739,543 @@ These flags are available for every command. They control the backends and may be set in the config file. ``` - --acd-auth-url string Auth server URL - --acd-client-id string OAuth Client Id - --acd-client-secret string OAuth Client Secret - --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi) - --acd-token string OAuth Access Token as a JSON blob - --acd-token-url string Token server url - --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s) - --alias-remote string Remote or path to alias - --azureblob-access-tier string Access tier of blob: hot, cool or archive - --azureblob-account string Storage Account Name - --azureblob-archive-tier-delete Delete archive tier blobs before overwriting - --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi) - --azureblob-disable-checksum Don't store MD5 checksum with object metadata - --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) - --azureblob-endpoint string Endpoint for the service - --azureblob-key string Storage Account Key - --azureblob-list-chunk int Size of blob list (default 5000) - --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any - --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any - --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any - --azureblob-no-head-object If set, do not do HEAD before GET when getting objects - --azureblob-public-access string Public access level of a container: blob or container - --azureblob-sas-url string SAS URL for container level access only - --azureblob-service-principal-file string Path to file containing credentials for use with a service principal - --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16) - --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated) - --azureblob-use-emulator Uses local storage emulator if provided as 'true' - --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) - --b2-account string Account ID or Application Key ID - --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi) - --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi) - --b2-disable-checksum Disable checksums for large (> upload cutoff) files - --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w) - --b2-download-url string Custom endpoint for downloads - --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --b2-endpoint string Endpoint for the service - --b2-hard-delete Permanently delete files on remote removal, otherwise hide files - --b2-key string Application Key - --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging - --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --b2-version-at Time Show file versions as they were at the specified time (default off) - --b2-versions Include old versions in directory listings - --box-access-token string Box App Primary Access Token - --box-auth-url string Auth server URL - --box-box-config-file string Box App config.json location - --box-box-sub-type string (default "user") - --box-client-id string OAuth Client Id - --box-client-secret string OAuth Client Secret - --box-commit-retries int Max number of times to try committing a multipart file (default 100) - --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot) - --box-list-chunk int Size of listing chunk 1-1000 (default 1000) - --box-owned-by string Only show items owned by the login (email address) passed in - --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point - --box-token string OAuth Access Token as a JSON blob - --box-token-url string Token server url - --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi) - --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s) - --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming - --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend") - --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi) - --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi) - --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend") - --cache-db-purge Clear all the cached data for this remote on start - --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) - --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s) - --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server - --cache-plex-password string The password of the Plex user (obscured) - --cache-plex-url string The URL of the Plex server - --cache-plex-username string The username of the Plex user - --cache-read-retries int How many times to retry a read from a cache storage (default 10) - --cache-remote string Remote to cache - --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1) - --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded - --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s) - --cache-workers int How many workers should run in parallel to download chunks (default 4) - --cache-writes Cache file data on writes through the FS - --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi) - --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks - --chunker-hash-type string Choose how chunker handles hash sums (default "md5") - --chunker-remote string Remote to chunk/unchunk - --combine-upstreams SpaceSepList Upstreams for combining - --compress-level int GZIP compression level (-2 to 9) (default -1) - --compress-mode string Compression mode (default "gzip") - --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi) - --compress-remote string Remote to compress - -L, --copy-links Follow symlinks and copy the pointed to item - --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true) - --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32") - --crypt-filename-encryption string How to encrypt the filenames (default "standard") - --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted - --crypt-password string Password or pass phrase for encryption (obscured) - --crypt-password2 string Password or pass phrase for salt (obscured) - --crypt-remote string Remote to encrypt/decrypt - --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs - --crypt-show-mapping For all files listed show how the names encrypt - --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded - --drive-allow-import-name-change Allow the filetype to change when uploading Google docs - --drive-auth-owner-only Only consider files owned by the authenticated user - --drive-auth-url string Auth server URL - --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi) - --drive-client-id string Google Application Client Id - --drive-client-secret string OAuth Client Secret - --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut - --drive-disable-http2 Disable drive using http2 (default true) - --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8) - --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg") - --drive-formats string Deprecated: See export_formats - --drive-impersonate string Impersonate this user when using a service account - --drive-import-formats string Comma separated list of preferred formats for uploading Google docs - --drive-keep-revision-forever Keep new head revision of each file forever - --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000) - --drive-pacer-burst int Number of API calls to allow without sleeping (default 100) - --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms) - --drive-resource-key string Resource key for accessing a link-shared file - --drive-root-folder-id string ID of the root folder - --drive-scope string Scope that rclone should use when requesting access from drive - --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs - --drive-service-account-credentials string Service Account Credentials JSON blob - --drive-service-account-file string Service Account Credentials JSON file path - --drive-shared-with-me Only show files that are shared with me - --drive-size-as-quota Show sizes as storage quota usage, not actual size - --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only - --drive-skip-dangling-shortcuts If set skip dangling shortcut files - --drive-skip-gdocs Skip google documents in all listings - --drive-skip-shortcuts If set skip shortcut files - --drive-starred-only Only show files that are starred - --drive-stop-on-download-limit Make download limit errors be fatal - --drive-stop-on-upload-limit Make upload limit errors be fatal - --drive-team-drive string ID of the Shared Drive (Team Drive) - --drive-token string OAuth Access Token as a JSON blob - --drive-token-url string Token server url - --drive-trashed-only Only show files that are in the trash - --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi) - --drive-use-created-date Use file created date instead of modified date - --drive-use-shared-date Use date file was shared instead of modified date - --drive-use-trash Send files to the trash instead of deleting permanently (default true) - --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off) - --dropbox-auth-url string Auth server URL - --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) - --dropbox-batch-mode string Upload file batching sync|async|off (default "sync") - --dropbox-batch-size int Max number of files in upload batch - --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) - --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi) - --dropbox-client-id string OAuth Client Id - --dropbox-client-secret string OAuth Client Secret - --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot) - --dropbox-impersonate string Impersonate this user when using a business account - --dropbox-shared-files Instructs rclone to work on individual shared files - --dropbox-shared-folders Instructs rclone to work on shared folders - --dropbox-token string OAuth Access Token as a JSON blob - --dropbox-token-url string Token server url - --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl - --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot) - --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured) - --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured) - --fichier-shared-folder string If you want to download a shared folder, add this parameter - --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) - --filefabric-permanent-token string Permanent Authentication Token - --filefabric-root-folder-id string ID of the root folder - --filefabric-token string Session Token - --filefabric-token-expiry string Token expiry time - --filefabric-url string URL of the Enterprise File Fabric to connect to - --filefabric-version string Version read from the file fabric - --ftp-ask-password Allow asking for FTP password when needed - --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) - --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited - --ftp-disable-epsv Disable using EPSV even if server advertises support - --ftp-disable-mlsd Disable using MLSD even if server advertises support - --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) - --ftp-disable-utf8 Disable using UTF-8 even if server advertises support - --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot) - --ftp-explicit-tls Use Explicit FTPS (FTP over TLS) - --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD - --ftp-host string FTP host to connect to - --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --ftp-no-check-certificate Do not verify the TLS certificate of the server - --ftp-pass string FTP password (obscured) - --ftp-port int FTP port number (default 21) - --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s) - --ftp-tls Use Implicit FTPS (FTP over TLS) - --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32) - --ftp-user string FTP username (default "$USER") - --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk) - --gcs-anonymous Access public buckets and objects without credentials - --gcs-auth-url string Auth server URL - --gcs-bucket-acl string Access Control List for new buckets - --gcs-bucket-policy-only Access checks should use bucket-level IAM policies - --gcs-client-id string OAuth Client Id - --gcs-client-secret string OAuth Client Secret - --gcs-decompress If set this will decompress gzip encoded objects - --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) - --gcs-endpoint string Endpoint for the service - --gcs-location string Location for the newly created buckets - --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it - --gcs-object-acl string Access Control List for new objects - --gcs-project-number string Project number - --gcs-service-account-file string Service Account Credentials JSON file path - --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage - --gcs-token string OAuth Access Token as a JSON blob - --gcs-token-url string Token server url - --gphotos-auth-url string Auth server URL - --gphotos-client-id string OAuth Client Id - --gphotos-client-secret string OAuth Client Secret - --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) - --gphotos-include-archived Also view and download archived media - --gphotos-read-only Set to make the Google Photos backend read only - --gphotos-read-size Set to read the size of media items - --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000) - --gphotos-token string OAuth Access Token as a JSON blob - --gphotos-token-url string Token server url - --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default) - --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1) - --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off) - --hasher-remote string Remote to cache checksums for (e.g. myRemote:path) - --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy - --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot) - --hdfs-namenode string Hadoop name node and port - --hdfs-service-principal-name string Kerberos service principal name for the namenode - --hdfs-username string Hadoop user name - --hidrive-auth-url string Auth server URL - --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi) - --hidrive-client-id string OAuth Client Id - --hidrive-client-secret string OAuth Client Secret - --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary - --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot) - --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1") - --hidrive-root-prefix string The root/parent folder for all paths (default "/") - --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw") - --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user") - --hidrive-token string OAuth Access Token as a JSON blob - --hidrive-token-url string Token server url - --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4) - --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi) - --http-headers CommaSepList Set HTTP headers for all transactions - --http-no-head Don't use HEAD requests - --http-no-slash Set this if the site doesn't end directories with / - --http-url string URL of HTTP host to connect to - --internetarchive-access-key-id string IAS3 Access Key - --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true) - --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot) - --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org") - --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org") - --internetarchive-secret-access-key string IAS3 Secret Key (password) - --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s) - --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot) - --jottacloud-hard-delete Delete files permanently rather than putting them into the trash - --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi) - --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them - --jottacloud-trashed-only Only show files that are in the trash - --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi) - --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --koofr-endpoint string The Koofr API endpoint to use - --koofr-mountid string Mount ID of the mount to use - --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured) - --koofr-provider string Choose your storage provider - --koofr-setmtime Does the backend support setting modification time (default true) - --koofr-user string Your user name - -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension - --local-case-insensitive Force the filesystem to report itself as case insensitive - --local-case-sensitive Force the filesystem to report itself as case sensitive - --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot) - --local-no-check-updated Don't check to see if the files change during upload - --local-no-preallocate Disable preallocation of disk space for transferred files - --local-no-set-modtime Disable setting modtime - --local-no-sparse Disable sparse files for multi-thread downloads - --local-nounc Disable UNC (long path names) conversion on Windows - --local-unicode-normalization Apply unicode NFC normalization to paths and filenames - --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated) - --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true) - --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --mailru-pass string Password (obscured) - --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true) - --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf") - --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi) - --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi) - --mailru-user string User name (usually email) - --mega-debug Output more debug from Mega - --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --mega-hard-delete Delete files permanently rather than putting them into the trash - --mega-pass string Password (obscured) - --mega-user string User name - --netstorage-account string Set the NetStorage account name - --netstorage-host string Domain+path of NetStorage host to connect to - --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https") - --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured) - -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only) - --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access) - --onedrive-auth-url string Auth server URL - --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi) - --onedrive-client-id string OAuth Client Id - --onedrive-client-secret string OAuth Client Secret - --onedrive-drive-id string The ID of the drive to use - --onedrive-drive-type string The type of the drive (personal | business | documentLibrary) - --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot) - --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings - --onedrive-link-password string Set the password for links created by the link command - --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous") - --onedrive-link-type string Set the type of the links created by the link command (default "view") - --onedrive-list-chunk int Size of listing chunk (default 1000) - --onedrive-no-versions Remove all versions on modifying operations - --onedrive-region string Choose national cloud region for OneDrive (default "global") - --onedrive-root-folder-id string ID of the root folder - --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs - --onedrive-token string OAuth Access Token as a JSON blob - --onedrive-token-url string Token server url - --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) - --oos-compartment string Object storage compartment OCID - --oos-config-file string Path to OCI config file (default "~/.oci/config") - --oos-config-profile string Profile name inside the oci config file (default "Default") - --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) - --oos-copy-timeout Duration Timeout for copy (default 1m0s) - --oos-disable-checksum Don't store MD5 checksum with object metadata - --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --oos-endpoint string Endpoint for Object storage API - --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery - --oos-namespace string Object storage namespace - --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it - --oos-provider string Choose your Auth Provider (default "env_auth") - --oos-region string Object storage Region - --oos-upload-concurrency int Concurrency for multipart uploads (default 10) - --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi) - --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) - --opendrive-password string Password (obscured) - --opendrive-username string Username - --pcloud-auth-url string Auth server URL - --pcloud-client-id string OAuth Client Id - --pcloud-client-secret string OAuth Client Secret - --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --pcloud-hostname string Hostname to connect to (default "api.pcloud.com") - --pcloud-password string Your pcloud password (obscured) - --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0") - --pcloud-token string OAuth Access Token as a JSON blob - --pcloud-token-url string Token server url - --pcloud-username string Your pcloud username - --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --qingstor-access-key-id string QingStor Access Key ID - --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi) - --qingstor-connection-retries int Number of connection retries (default 3) - --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8) - --qingstor-endpoint string Enter an endpoint URL to connection QingStor API - --qingstor-env-auth Get QingStor credentials from runtime - --qingstor-secret-access-key string QingStor Secret Access Key (password) - --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1) - --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --qingstor-zone string Zone to connect to - --s3-access-key-id string AWS Access Key ID - --s3-acl string Canned ACL used when creating buckets and storing or copying objects - --s3-bucket-acl string Canned ACL used when creating buckets - --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) - --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) - --s3-decompress If set this will decompress gzip encoded objects - --s3-disable-checksum Don't store MD5 checksum with object metadata - --s3-disable-http2 Disable usage of http2 for S3 backends - --s3-download-url string Custom endpoint for downloads - --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --s3-endpoint string Endpoint for S3 API - --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars) - --s3-force-path-style If true use path style access if false use virtual hosted style (default true) - --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery - --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000) - --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset) - --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto - --s3-location-constraint string Location constraint - must be set to match the Region - --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000) - --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it - --s3-no-head If set, don't HEAD uploaded objects to check integrity - --s3-no-head-object If set, do not do HEAD before GET when getting objects - --s3-no-system-metadata Suppress setting and reading of system metadata - --s3-profile string Profile to use in the shared credentials file - --s3-provider string Choose your S3 provider - --s3-region string Region to connect to - --s3-requester-pays Enables requester pays option when interacting with S3 bucket - --s3-secret-access-key string AWS Secret Access Key (password) - --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 - --s3-session-token string An AWS session token - --s3-shared-credentials-file string Path to the shared credentials file - --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3 - --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data - --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data - --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) - --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key - --s3-storage-class string The storage class to use when storing new objects in S3 - --s3-upload-concurrency int Concurrency for multipart uploads (default 4) - --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint - --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) - --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads - --s3-v2-auth If true use v2 authentication - --s3-version-at Time Show file versions as they were at the specified time (default off) - --s3-versions Include old versions in directory listings - --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled) - --seafile-create-library Should rclone create a library if it doesn't exist - --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8) - --seafile-library string Name of the library - --seafile-library-key string Library password (for encrypted libraries only) (obscured) - --seafile-pass string Password (obscured) - --seafile-url string URL of seafile host to connect to - --seafile-user string User name (usually email address) - --sftp-ask-password Allow asking for SFTP password when needed - --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki) - --sftp-concurrency int The maximum number of outstanding requests for one file (default 64) - --sftp-disable-concurrent-reads If set don't use concurrent reads - --sftp-disable-concurrent-writes If set don't use concurrent writes - --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available - --sftp-host string SSH host to connect to - --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --sftp-key-file string Path to PEM-encoded private key file - --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured) - --sftp-key-pem string Raw PEM-encoded private key - --sftp-key-use-agent When set forces the usage of the ssh-agent - --sftp-known-hosts-file string Optional path to known_hosts file - --sftp-md5sum-command string The command used to read md5 hashes - --sftp-pass string SSH password, leave blank to use ssh-agent (obscured) - --sftp-path-override string Override path used by SSH shell commands - --sftp-port int SSH port number (default 22) - --sftp-pubkey-file string Optional path to public key file - --sftp-server-command string Specifies the path or command to run a sftp server on the remote host - --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands - --sftp-set-modtime Set the modified time on the remote if set (default true) - --sftp-sha1sum-command string The command used to read sha1 hashes - --sftp-shell-type string The type of SSH shell on remote server, if any - --sftp-skip-links Set to skip any symlinks and any other non regular files - --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp") - --sftp-use-fstat If set use fstat instead of stat - --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods - --sftp-user string SSH username (default "$USER") - --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi) - --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot) - --sharefile-endpoint string Endpoint for API calls - --sharefile-root-folder-id string ID of the root folder - --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi) - --sia-api-password string Sia Daemon API Password (obscured) - --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980") - --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot) - --sia-user-agent string Siad User Agent (default "Sia-Agent") - --skip-links Don't warn about skipped symlinks - --smb-case-insensitive Whether the server is configured to be case-insensitive (default true) - --smb-domain string Domain name for NTLM authentication (default "WORKGROUP") - --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot) - --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true) - --smb-host string SMB server hostname to connect to - --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --smb-pass string SMB password (obscured) - --smb-port int SMB port number (default 445) - --smb-user string SMB username (default "$USER") - --storj-access-grant string Access grant - --storj-api-key string API key - --storj-passphrase string Encryption passphrase - --storj-provider string Choose an authentication method (default "existing") - --storj-satellite-address string Satellite address (default "us-central-1.storj.io") - --sugarsync-access-key-id string Sugarsync Access Key ID - --sugarsync-app-id string Sugarsync App ID - --sugarsync-authorization string Sugarsync authorization - --sugarsync-authorization-expiry string Sugarsync authorization expiry - --sugarsync-deleted-id string Sugarsync deleted folder id - --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot) - --sugarsync-hard-delete Permanently delete files if true - --sugarsync-private-access-key string Sugarsync Private Access Key - --sugarsync-refresh-token string Sugarsync refresh token - --sugarsync-root-id string Sugarsync root id - --sugarsync-user string Sugarsync user - --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID) - --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME) - --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET) - --swift-auth string Authentication URL for server (OS_AUTH_URL) - --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) - --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) - --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi) - --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) - --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8) - --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") - --swift-env-auth Get swift credentials from environment variables in standard OpenStack form - --swift-key string API key or password (OS_PASSWORD) - --swift-leave-parts-on-error If true avoid calling abort upload on a failure - --swift-no-chunk Don't chunk files during streaming upload - --swift-no-large-objects Disable support for static and dynamic large objects - --swift-region string Region name - optional (OS_REGION_NAME) - --swift-storage-policy string The storage policy to use when creating a new container - --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) - --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) - --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) - --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) - --swift-user string User name to log in (OS_USERNAME) - --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID) - --union-action-policy string Policy to choose upstream on ACTION category (default "epall") - --union-cache-time int Cache time of usage and free space (in seconds) (default 120) - --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs") - --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi) - --union-search-policy string Policy to choose upstream on SEARCH category (default "ff") - --union-upstreams string List of space separated upstreams - --uptobox-access-token string Your access token - --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) - --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) - --webdav-bearer-token-command string Command to run to get a bearer token - --webdav-encoding string The encoding for the backend - --webdav-headers CommaSepList Set HTTP headers for all transactions - --webdav-pass string Password (obscured) - --webdav-url string URL of http host to connect to - --webdav-user string User name - --webdav-vendor string Name of the WebDAV site/service/software you are using - --yandex-auth-url string Auth server URL - --yandex-client-id string OAuth Client Id - --yandex-client-secret string OAuth Client Secret - --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) - --yandex-hard-delete Delete files permanently rather than putting them into the trash - --yandex-token string OAuth Access Token as a JSON blob - --yandex-token-url string Token server url - --zoho-auth-url string Auth server URL - --zoho-client-id string OAuth Client Id - --zoho-client-secret string OAuth Client Secret - --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8) - --zoho-region string Zoho region to connect to - --zoho-token string OAuth Access Token as a JSON blob - --zoho-token-url string Token server url + --acd-auth-url string Auth server URL + --acd-client-id string OAuth Client Id + --acd-client-secret string OAuth Client Secret + --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi) + --acd-token string OAuth Access Token as a JSON blob + --acd-token-url string Token server url + --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s) + --alias-remote string Remote or path to alias + --azureblob-access-tier string Access tier of blob: hot, cool or archive + --azureblob-account string Azure Storage Account Name + --azureblob-archive-tier-delete Delete archive tier blobs before overwriting + --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi) + --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured) + --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key + --azureblob-client-id string The ID of the client in use + --azureblob-client-secret string One of the service principal's client secrets + --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth + --azureblob-disable-checksum Don't store MD5 checksum with object metadata + --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) + --azureblob-endpoint string Endpoint for the service + --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI) + --azureblob-key string Storage Account Shared Key + --azureblob-list-chunk int Size of blob list (default 5000) + --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any + --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any + --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any + --azureblob-no-check-container If set, don't attempt to check the container exists or create it + --azureblob-no-head-object If set, do not do HEAD before GET when getting objects + --azureblob-password string The user's password (obscured) + --azureblob-public-access string Public access level of a container: blob or container + --azureblob-sas-url string SAS URL for container level access only + --azureblob-service-principal-file string Path to file containing credentials for use with a service principal + --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID + --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16) + --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated) + --azureblob-use-emulator Uses local storage emulator if provided as 'true' + --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) + --azureblob-username string User name (usually an email address) + --b2-account string Account ID or Application Key ID + --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi) + --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi) + --b2-disable-checksum Disable checksums for large (> upload cutoff) files + --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w) + --b2-download-url string Custom endpoint for downloads + --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --b2-endpoint string Endpoint for the service + --b2-hard-delete Permanently delete files on remote removal, otherwise hide files + --b2-key string Application Key + --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging + --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --b2-version-at Time Show file versions as they were at the specified time (default off) + --b2-versions Include old versions in directory listings + --box-access-token string Box App Primary Access Token + --box-auth-url string Auth server URL + --box-box-config-file string Box App config.json location + --box-box-sub-type string (default "user") + --box-client-id string OAuth Client Id + --box-client-secret string OAuth Client Secret + --box-commit-retries int Max number of times to try committing a multipart file (default 100) + --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot) + --box-list-chunk int Size of listing chunk 1-1000 (default 1000) + --box-owned-by string Only show items owned by the login (email address) passed in + --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point + --box-token string OAuth Access Token as a JSON blob + --box-token-url string Token server url + --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi) + --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s) + --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming + --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend") + --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi) + --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi) + --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend") + --cache-db-purge Clear all the cached data for this remote on start + --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) + --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s) + --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server + --cache-plex-password string The password of the Plex user (obscured) + --cache-plex-url string The URL of the Plex server + --cache-plex-username string The username of the Plex user + --cache-read-retries int How many times to retry a read from a cache storage (default 10) + --cache-remote string Remote to cache + --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1) + --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded + --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s) + --cache-workers int How many workers should run in parallel to download chunks (default 4) + --cache-writes Cache file data on writes through the FS + --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi) + --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks + --chunker-hash-type string Choose how chunker handles hash sums (default "md5") + --chunker-remote string Remote to chunk/unchunk + --combine-upstreams SpaceSepList Upstreams for combining + --compress-level int GZIP compression level (-2 to 9) (default -1) + --compress-mode string Compression mode (default "gzip") + --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi) + --compress-remote string Remote to compress + -L, --copy-links Follow symlinks and copy the pointed to item + --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true) + --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32") + --crypt-filename-encryption string How to encrypt the filenames (default "standard") + --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted + --crypt-password string Password or pass phrase for encryption (obscured) + --crypt-password2 string Password or pass phrase for salt (obscured) + --crypt-remote string Remote to encrypt/decrypt + --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs + --crypt-show-mapping For all files listed show how the names encrypt + --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded + --drive-allow-import-name-change Allow the filetype to change when uploading Google docs + --drive-auth-owner-only Only consider files owned by the authenticated user + --drive-auth-url string Auth server URL + --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi) + --drive-client-id string Google Application Client Id + --drive-client-secret string OAuth Client Secret + --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut + --drive-disable-http2 Disable drive using http2 (default true) + --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8) + --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg") + --drive-formats string Deprecated: See export_formats + --drive-impersonate string Impersonate this user when using a service account + --drive-import-formats string Comma separated list of preferred formats for uploading Google docs + --drive-keep-revision-forever Keep new head revision of each file forever + --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000) + --drive-pacer-burst int Number of API calls to allow without sleeping (default 100) + --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms) + --drive-resource-key string Resource key for accessing a link-shared file + --drive-root-folder-id string ID of the root folder + --drive-scope string Scope that rclone should use when requesting access from drive + --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs + --drive-service-account-credentials string Service Account Credentials JSON blob + --drive-service-account-file string Service Account Credentials JSON file path + --drive-shared-with-me Only show files that are shared with me + --drive-size-as-quota Show sizes as storage quota usage, not actual size + --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only + --drive-skip-dangling-shortcuts If set skip dangling shortcut files + --drive-skip-gdocs Skip google documents in all listings + --drive-skip-shortcuts If set skip shortcut files + --drive-starred-only Only show files that are starred + --drive-stop-on-download-limit Make download limit errors be fatal + --drive-stop-on-upload-limit Make upload limit errors be fatal + --drive-team-drive string ID of the Shared Drive (Team Drive) + --drive-token string OAuth Access Token as a JSON blob + --drive-token-url string Token server url + --drive-trashed-only Only show files that are in the trash + --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi) + --drive-use-created-date Use file created date instead of modified date + --drive-use-shared-date Use date file was shared instead of modified date + --drive-use-trash Send files to the trash instead of deleting permanently (default true) + --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off) + --dropbox-auth-url string Auth server URL + --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) + --dropbox-batch-mode string Upload file batching sync|async|off (default "sync") + --dropbox-batch-size int Max number of files in upload batch + --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) + --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi) + --dropbox-client-id string OAuth Client Id + --dropbox-client-secret string OAuth Client Secret + --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot) + --dropbox-impersonate string Impersonate this user when using a business account + --dropbox-shared-files Instructs rclone to work on individual shared files + --dropbox-shared-folders Instructs rclone to work on shared folders + --dropbox-token string OAuth Access Token as a JSON blob + --dropbox-token-url string Token server url + --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl + --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot) + --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured) + --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured) + --fichier-shared-folder string If you want to download a shared folder, add this parameter + --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) + --filefabric-permanent-token string Permanent Authentication Token + --filefabric-root-folder-id string ID of the root folder + --filefabric-token string Session Token + --filefabric-token-expiry string Token expiry time + --filefabric-url string URL of the Enterprise File Fabric to connect to + --filefabric-version string Version read from the file fabric + --ftp-ask-password Allow asking for FTP password when needed + --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) + --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited + --ftp-disable-epsv Disable using EPSV even if server advertises support + --ftp-disable-mlsd Disable using MLSD even if server advertises support + --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) + --ftp-disable-utf8 Disable using UTF-8 even if server advertises support + --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot) + --ftp-explicit-tls Use Explicit FTPS (FTP over TLS) + --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD + --ftp-host string FTP host to connect to + --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --ftp-no-check-certificate Do not verify the TLS certificate of the server + --ftp-pass string FTP password (obscured) + --ftp-port int FTP port number (default 21) + --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s) + --ftp-tls Use Implicit FTPS (FTP over TLS) + --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32) + --ftp-user string FTP username (default "$USER") + --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk) + --gcs-anonymous Access public buckets and objects without credentials + --gcs-auth-url string Auth server URL + --gcs-bucket-acl string Access Control List for new buckets + --gcs-bucket-policy-only Access checks should use bucket-level IAM policies + --gcs-client-id string OAuth Client Id + --gcs-client-secret string OAuth Client Secret + --gcs-decompress If set this will decompress gzip encoded objects + --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) + --gcs-endpoint string Endpoint for the service + --gcs-location string Location for the newly created buckets + --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it + --gcs-object-acl string Access Control List for new objects + --gcs-project-number string Project number + --gcs-service-account-file string Service Account Credentials JSON file path + --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage + --gcs-token string OAuth Access Token as a JSON blob + --gcs-token-url string Token server url + --gphotos-auth-url string Auth server URL + --gphotos-client-id string OAuth Client Id + --gphotos-client-secret string OAuth Client Secret + --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) + --gphotos-include-archived Also view and download archived media + --gphotos-read-only Set to make the Google Photos backend read only + --gphotos-read-size Set to read the size of media items + --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000) + --gphotos-token string OAuth Access Token as a JSON blob + --gphotos-token-url string Token server url + --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default) + --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1) + --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off) + --hasher-remote string Remote to cache checksums for (e.g. myRemote:path) + --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy + --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot) + --hdfs-namenode string Hadoop name node and port + --hdfs-service-principal-name string Kerberos service principal name for the namenode + --hdfs-username string Hadoop user name + --hidrive-auth-url string Auth server URL + --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi) + --hidrive-client-id string OAuth Client Id + --hidrive-client-secret string OAuth Client Secret + --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary + --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot) + --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1") + --hidrive-root-prefix string The root/parent folder for all paths (default "/") + --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw") + --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user") + --hidrive-token string OAuth Access Token as a JSON blob + --hidrive-token-url string Token server url + --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4) + --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi) + --http-headers CommaSepList Set HTTP headers for all transactions + --http-no-head Don't use HEAD requests + --http-no-slash Set this if the site doesn't end directories with / + --http-url string URL of HTTP host to connect to + --internetarchive-access-key-id string IAS3 Access Key + --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true) + --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot) + --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org") + --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org") + --internetarchive-secret-access-key string IAS3 Secret Key (password) + --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s) + --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot) + --jottacloud-hard-delete Delete files permanently rather than putting them into the trash + --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi) + --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them + --jottacloud-trashed-only Only show files that are in the trash + --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi) + --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --koofr-endpoint string The Koofr API endpoint to use + --koofr-mountid string Mount ID of the mount to use + --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured) + --koofr-provider string Choose your storage provider + --koofr-setmtime Does the backend support setting modification time (default true) + --koofr-user string Your user name + -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension + --local-case-insensitive Force the filesystem to report itself as case insensitive + --local-case-sensitive Force the filesystem to report itself as case sensitive + --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot) + --local-no-check-updated Don't check to see if the files change during upload + --local-no-preallocate Disable preallocation of disk space for transferred files + --local-no-set-modtime Disable setting modtime + --local-no-sparse Disable sparse files for multi-thread downloads + --local-nounc Disable UNC (long path names) conversion on Windows + --local-unicode-normalization Apply unicode NFC normalization to paths and filenames + --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated) + --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true) + --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --mailru-pass string Password (obscured) + --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true) + --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf") + --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi) + --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi) + --mailru-user string User name (usually email) + --mega-debug Output more debug from Mega + --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --mega-hard-delete Delete files permanently rather than putting them into the trash + --mega-pass string Password (obscured) + --mega-user string User name + --netstorage-account string Set the NetStorage account name + --netstorage-host string Domain+path of NetStorage host to connect to + --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https") + --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured) + -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only) + --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access) + --onedrive-auth-url string Auth server URL + --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi) + --onedrive-client-id string OAuth Client Id + --onedrive-client-secret string OAuth Client Secret + --onedrive-drive-id string The ID of the drive to use + --onedrive-drive-type string The type of the drive (personal | business | documentLibrary) + --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot) + --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings + --onedrive-link-password string Set the password for links created by the link command + --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous") + --onedrive-link-type string Set the type of the links created by the link command (default "view") + --onedrive-list-chunk int Size of listing chunk (default 1000) + --onedrive-no-versions Remove all versions on modifying operations + --onedrive-region string Choose national cloud region for OneDrive (default "global") + --onedrive-root-folder-id string ID of the root folder + --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs + --onedrive-token string OAuth Access Token as a JSON blob + --onedrive-token-url string Token server url + --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) + --oos-compartment string Object storage compartment OCID + --oos-config-file string Path to OCI config file (default "~/.oci/config") + --oos-config-profile string Profile name inside the oci config file (default "Default") + --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) + --oos-copy-timeout Duration Timeout for copy (default 1m0s) + --oos-disable-checksum Don't store MD5 checksum with object metadata + --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --oos-endpoint string Endpoint for Object storage API + --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery + --oos-namespace string Object storage namespace + --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it + --oos-provider string Choose your Auth Provider (default "env_auth") + --oos-region string Object storage Region + --oos-upload-concurrency int Concurrency for multipart uploads (default 10) + --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi) + --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) + --opendrive-password string Password (obscured) + --opendrive-username string Username + --pcloud-auth-url string Auth server URL + --pcloud-client-id string OAuth Client Id + --pcloud-client-secret string OAuth Client Secret + --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --pcloud-hostname string Hostname to connect to (default "api.pcloud.com") + --pcloud-password string Your pcloud password (obscured) + --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0") + --pcloud-token string OAuth Access Token as a JSON blob + --pcloud-token-url string Token server url + --pcloud-username string Your pcloud username + --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --qingstor-access-key-id string QingStor Access Key ID + --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi) + --qingstor-connection-retries int Number of connection retries (default 3) + --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8) + --qingstor-endpoint string Enter an endpoint URL to connection QingStor API + --qingstor-env-auth Get QingStor credentials from runtime + --qingstor-secret-access-key string QingStor Secret Access Key (password) + --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1) + --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --qingstor-zone string Zone to connect to + --s3-access-key-id string AWS Access Key ID + --s3-acl string Canned ACL used when creating buckets and storing or copying objects + --s3-bucket-acl string Canned ACL used when creating buckets + --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) + --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) + --s3-decompress If set this will decompress gzip encoded objects + --s3-disable-checksum Don't store MD5 checksum with object metadata + --s3-disable-http2 Disable usage of http2 for S3 backends + --s3-download-url string Custom endpoint for downloads + --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --s3-endpoint string Endpoint for S3 API + --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars) + --s3-force-path-style If true use path style access if false use virtual hosted style (default true) + --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery + --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000) + --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset) + --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto + --s3-location-constraint string Location constraint - must be set to match the Region + --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000) + --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset) + --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it + --s3-no-head If set, don't HEAD uploaded objects to check integrity + --s3-no-head-object If set, do not do HEAD before GET when getting objects + --s3-no-system-metadata Suppress setting and reading of system metadata + --s3-profile string Profile to use in the shared credentials file + --s3-provider string Choose your S3 provider + --s3-region string Region to connect to + --s3-requester-pays Enables requester pays option when interacting with S3 bucket + --s3-secret-access-key string AWS Secret Access Key (password) + --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 + --s3-session-token string An AWS session token + --s3-shared-credentials-file string Path to the shared credentials file + --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3 + --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data + --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data + --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) + --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key + --s3-storage-class string The storage class to use when storing new objects in S3 + --s3-upload-concurrency int Concurrency for multipart uploads (default 4) + --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint + --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) + --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads + --s3-v2-auth If true use v2 authentication + --s3-version-at Time Show file versions as they were at the specified time (default off) + --s3-versions Include old versions in directory listings + --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled) + --seafile-create-library Should rclone create a library if it doesn't exist + --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8) + --seafile-library string Name of the library + --seafile-library-key string Library password (for encrypted libraries only) (obscured) + --seafile-pass string Password (obscured) + --seafile-url string URL of seafile host to connect to + --seafile-user string User name (usually email address) + --sftp-ask-password Allow asking for SFTP password when needed + --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki) + --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference + --sftp-concurrency int The maximum number of outstanding requests for one file (default 64) + --sftp-disable-concurrent-reads If set don't use concurrent reads + --sftp-disable-concurrent-writes If set don't use concurrent writes + --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available + --sftp-host string SSH host to connect to + --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference + --sftp-key-file string Path to PEM-encoded private key file + --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured) + --sftp-key-pem string Raw PEM-encoded private key + --sftp-key-use-agent When set forces the usage of the ssh-agent + --sftp-known-hosts-file string Optional path to known_hosts file + --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference + --sftp-md5sum-command string The command used to read md5 hashes + --sftp-pass string SSH password, leave blank to use ssh-agent (obscured) + --sftp-path-override string Override path used by SSH shell commands + --sftp-port int SSH port number (default 22) + --sftp-pubkey-file string Optional path to public key file + --sftp-server-command string Specifies the path or command to run a sftp server on the remote host + --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands + --sftp-set-modtime Set the modified time on the remote if set (default true) + --sftp-sha1sum-command string The command used to read sha1 hashes + --sftp-shell-type string The type of SSH shell on remote server, if any + --sftp-skip-links Set to skip any symlinks and any other non regular files + --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp") + --sftp-use-fstat If set use fstat instead of stat + --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods + --sftp-user string SSH username (default "$USER") + --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi) + --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot) + --sharefile-endpoint string Endpoint for API calls + --sharefile-root-folder-id string ID of the root folder + --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi) + --sia-api-password string Sia Daemon API Password (obscured) + --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980") + --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot) + --sia-user-agent string Siad User Agent (default "Sia-Agent") + --skip-links Don't warn about skipped symlinks + --smb-case-insensitive Whether the server is configured to be case-insensitive (default true) + --smb-domain string Domain name for NTLM authentication (default "WORKGROUP") + --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot) + --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true) + --smb-host string SMB server hostname to connect to + --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --smb-pass string SMB password (obscured) + --smb-port int SMB port number (default 445) + --smb-user string SMB username (default "$USER") + --storj-access-grant string Access grant + --storj-api-key string API key + --storj-passphrase string Encryption passphrase + --storj-provider string Choose an authentication method (default "existing") + --storj-satellite-address string Satellite address (default "us-central-1.storj.io") + --sugarsync-access-key-id string Sugarsync Access Key ID + --sugarsync-app-id string Sugarsync App ID + --sugarsync-authorization string Sugarsync authorization + --sugarsync-authorization-expiry string Sugarsync authorization expiry + --sugarsync-deleted-id string Sugarsync deleted folder id + --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot) + --sugarsync-hard-delete Permanently delete files if true + --sugarsync-private-access-key string Sugarsync Private Access Key + --sugarsync-refresh-token string Sugarsync refresh token + --sugarsync-root-id string Sugarsync root id + --sugarsync-user string Sugarsync user + --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID) + --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME) + --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET) + --swift-auth string Authentication URL for server (OS_AUTH_URL) + --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) + --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) + --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi) + --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) + --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8) + --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") + --swift-env-auth Get swift credentials from environment variables in standard OpenStack form + --swift-key string API key or password (OS_PASSWORD) + --swift-leave-parts-on-error If true avoid calling abort upload on a failure + --swift-no-chunk Don't chunk files during streaming upload + --swift-no-large-objects Disable support for static and dynamic large objects + --swift-region string Region name - optional (OS_REGION_NAME) + --swift-storage-policy string The storage policy to use when creating a new container + --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) + --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) + --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) + --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) + --swift-user string User name to log in (OS_USERNAME) + --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID) + --union-action-policy string Policy to choose upstream on ACTION category (default "epall") + --union-cache-time int Cache time of usage and free space (in seconds) (default 120) + --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs") + --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi) + --union-search-policy string Policy to choose upstream on SEARCH category (default "ff") + --union-upstreams string List of space separated upstreams + --uptobox-access-token string Your access token + --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) + --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) + --webdav-bearer-token-command string Command to run to get a bearer token + --webdav-encoding string The encoding for the backend + --webdav-headers CommaSepList Set HTTP headers for all transactions + --webdav-pass string Password (obscured) + --webdav-url string URL of http host to connect to + --webdav-user string User name + --webdav-vendor string Name of the WebDAV site/service/software you are using + --yandex-auth-url string Auth server URL + --yandex-client-id string OAuth Client Id + --yandex-client-secret string OAuth Client Secret + --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) + --yandex-hard-delete Delete files permanently rather than putting them into the trash + --yandex-token string OAuth Access Token as a JSON blob + --yandex-token-url string Token server url + --zoho-auth-url string Auth server URL + --zoho-client-id string OAuth Client Id + --zoho-client-secret string OAuth Client Secret + --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8) + --zoho-region string Zoho region to connect to + --zoho-token string OAuth Access Token as a JSON blob + --zoho-token-url string Token server url ``` # Docker Volume Plugin @@ -17054,9 +17296,10 @@ Token server url - leave blank to use Amazon's. token_url> Optional token URL Remote config Make sure your Redirect URL is set to "http://127.0.0.1:53682/" in your custom config. -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -17329,6 +17572,7 @@ The S3 backend can be used with a number of different providers: - IBM COS S3 - IDrive e2 - IONOS Cloud +- Liara Object Storage - Minio - Qiniu Cloud Object Storage (Kodo) - RackCorp Object Storage @@ -17385,7 +17629,7 @@ name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Ceph, ChinaMobile, ArvanCloud, Dreamhost, IBM COS, Minio, and Tencent COS +XX / Amazon S3 Compliant Storage Providers including AWS, Ceph, ChinaMobile, ArvanCloud, Dreamhost, IBM COS, Liara, Minio, and Tencent COS \ "s3" [snip] Storage> s3 @@ -17395,7 +17639,7 @@ Choose a number from below, or type in your own value \ "AWS" 2 / Ceph Object Storage \ "Ceph" - 3 / Digital Ocean Spaces + 3 / DigitalOcean Spaces \ "DigitalOcean" 4 / Dreamhost DreamObjects \ "Dreamhost" @@ -17950,7 +18194,7 @@ A simple solution is to set the `--s3-upload-cutoff 0` and force all the files t ### Standard options -Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). +Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). #### --s3-provider @@ -17976,7 +18220,7 @@ Properties: - "ArvanCloud" - Arvan Cloud Object Storage (AOS) - "DigitalOcean" - - Digital Ocean Spaces + - DigitalOcean Spaces - "Dreamhost" - Dreamhost DreamObjects - "HuaweiOBS" @@ -17989,6 +18233,8 @@ Properties: - IONOS Cloud - "LyveCloud" - Seagate Lyve Cloud + - "Liara" + - Liara Object Storage - "Minio" - Minio Object Storage - "Netease" @@ -18340,7 +18586,7 @@ Properties: - Config: region - Env Var: RCLONE_S3_REGION -- Provider: !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive +- Provider: !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive - Type: string - Required: false - Examples: @@ -18619,6 +18865,22 @@ Properties: #### --s3-endpoint +Endpoint for Liara Object Storage API. + +Properties: + +- Config: endpoint +- Env Var: RCLONE_S3_ENDPOINT +- Provider: Liara +- Type: string +- Required: false +- Examples: + - "storage.iran.liara.space" + - The default endpoint + - Iran + +#### --s3-endpoint + Endpoint for OSS API. Properties: @@ -18919,18 +19181,24 @@ Properties: - Config: endpoint - Env Var: RCLONE_S3_ENDPOINT -- Provider: !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu +- Provider: !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu - Type: string - Required: false - Examples: - "objects-us-east-1.dream.io" - Dream Objects endpoint + - "syd1.digitaloceanspaces.com" + - DigitalOcean Spaces Sydney 1 + - "sfo3.digitaloceanspaces.com" + - DigitalOcean Spaces San Francisco 3 + - "fra1.digitaloceanspaces.com" + - DigitalOcean Spaces Frankfurt 1 - "nyc3.digitaloceanspaces.com" - - Digital Ocean Spaces New York 3 + - DigitalOcean Spaces New York 3 - "ams3.digitaloceanspaces.com" - - Digital Ocean Spaces Amsterdam 3 + - DigitalOcean Spaces Amsterdam 3 - "sgp1.digitaloceanspaces.com" - - Digital Ocean Spaces Singapore 1 + - DigitalOcean Spaces Singapore 1 - "localhost:8333" - SeaweedFS S3 localhost - "s3.us-east-1.lyvecloud.seagate.com" @@ -18940,15 +19208,33 @@ Properties: - "s3.ap-southeast-1.lyvecloud.seagate.com" - Seagate Lyve Cloud AP Southeast 1 (Singapore) - "s3.wasabisys.com" - - Wasabi US East endpoint + - Wasabi US East 1 (N. Virginia) + - "s3.us-east-2.wasabisys.com" + - Wasabi US East 2 (N. Virginia) + - "s3.us-central-1.wasabisys.com" + - Wasabi US Central 1 (Texas) - "s3.us-west-1.wasabisys.com" - - Wasabi US West endpoint + - Wasabi US West 1 (Oregon) + - "s3.ca-central-1.wasabisys.com" + - Wasabi CA Central 1 (Toronto) - "s3.eu-central-1.wasabisys.com" - - Wasabi EU Central endpoint + - Wasabi EU Central 1 (Amsterdam) + - "s3.eu-central-2.wasabisys.com" + - Wasabi EU Central 2 (Frankfurt) + - "s3.eu-west-1.wasabisys.com" + - Wasabi EU West 1 (London) + - "s3.eu-west-2.wasabisys.com" + - Wasabi EU West 2 (Paris) - "s3.ap-northeast-1.wasabisys.com" - Wasabi AP Northeast 1 (Tokyo) endpoint - "s3.ap-northeast-2.wasabisys.com" - Wasabi AP Northeast 2 (Osaka) endpoint + - "s3.ap-southeast-1.wasabisys.com" + - Wasabi AP Southeast 1 (Singapore) + - "s3.ap-southeast-2.wasabisys.com" + - Wasabi AP Southeast 2 (Sydney) + - "storage.iran.liara.space" + - Liara Iran endpoint - "s3.ir-thr-at1.arvanstorage.com" - ArvanCloud Tehran Iran (Asiatech) endpoint @@ -19281,7 +19567,7 @@ Properties: - Config: location_constraint - Env Var: RCLONE_S3_LOCATION_CONSTRAINT -- Provider: !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS +- Provider: !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS - Type: string - Required: false @@ -19296,6 +19582,10 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview Note that this ACL is applied when server-side copying objects as S3 doesn't copy the ACL from the source but rather writes a fresh one. +If the acl is an empty string then no X-Amz-Acl: header is added and +the default (private) will be used. + + Properties: - Config: acl @@ -19456,6 +19746,21 @@ Properties: #### --s3-storage-class +The storage class to use when storing new objects in Liara + +Properties: + +- Config: storage_class +- Env Var: RCLONE_S3_STORAGE_CLASS +- Provider: Liara +- Type: string +- Required: false +- Examples: + - "STANDARD" + - Standard storage class + +#### --s3-storage-class + The storage class to use when storing new objects in ArvanCloud. Properties: @@ -19534,7 +19839,7 @@ Properties: ### Advanced options -Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). +Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). #### --s3-bucket-acl @@ -19545,6 +19850,10 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview Note that this ACL is applied when only when creating buckets. If it isn't set then "acl" is used instead. +If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: +header is added and the default (private) will be used. + + Properties: - Config: bucket_acl @@ -20167,6 +20476,37 @@ Properties: - Type: bool - Default: false +#### --s3-might-gzip + +Set this if the backend might gzip objects. + +Normally providers will not alter objects when they are downloaded. If +an object was not uploaded with `Content-Encoding: gzip` then it won't +be set on download. + +However some providers may gzip objects even if they weren't uploaded +with `Content-Encoding: gzip` (eg Cloudflare). + +A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + +If you set this flag and rclone downloads an object with +Content-Encoding: gzip set and chunked transfer encoding, then rclone +will decompress the object on the fly. + +If this is set to unset (the default) then rclone will choose +according to the provider setting what to apply, but you can override +rclone's choice here. + + +Properties: + +- Config: might_gzip +- Env Var: RCLONE_S3_MIGHT_GZIP +- Type: Tristate +- Default: unset + #### --s3-no-system-metadata Suppress setting and reading of system metadata @@ -20499,7 +20839,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. ... -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) ... Storage> s3 @@ -20668,7 +21008,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. [snip] - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) [snip] Storage> 5 @@ -20796,7 +21136,7 @@ Choose a number from below, or type in your own value \ "alias" 2 / Amazon Drive \ "amazon cloud drive" - 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio, IBM COS) + 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, Liara, ArvanCloud, Minio, IBM COS) \ "s3" 4 / Backblaze B2 \ "b2" @@ -20963,7 +21303,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) [snip] Storage> s3 @@ -21069,7 +21409,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) [snip] Storage> s3 @@ -21315,7 +21655,7 @@ Choose a number from below, or type in your own value \ (alias) 4 / Amazon Drive \ (amazon cloud drive) - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi \ (s3) [snip] Storage> s3 @@ -21555,7 +21895,7 @@ Choose `s3` backend Type of storage to configure. Choose a number from below, or type in your own value. [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS \ (s3) [snip] Storage> s3 @@ -21742,7 +22082,7 @@ name> wasabi Type of storage to configure. Choose a number from below, or type in your own value [snip] -XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio) +XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio, Liara) \ "s3" [snip] Storage> s3 @@ -21856,7 +22196,7 @@ Type of storage to configure. Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value [snip] - 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Minio, and Tencent COS + 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS \ "s3" [snip] Storage> s3 @@ -21966,7 +22306,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. ... - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS \ (s3) ... Storage> s3 @@ -22196,6 +22536,107 @@ d) Delete this remote y/e/d> y ``` +### Liara {#liara-cloud} + +Here is an example of making a [Liara Object Storage](https://liara.ir/landing/object-storage) +configuration. First run: + + rclone config + +This will guide you through an interactive setup process. + +``` +No remotes found, make a new one? +n) New remote +s) Set configuration password +n/s> n +name> Liara +Type of storage to configure. +Choose a number from below, or type in your own value +[snip] +XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Liara, Minio) + \ "s3" +[snip] +Storage> s3 +Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank. +Choose a number from below, or type in your own value + 1 / Enter AWS credentials in the next step + \ "false" + 2 / Get AWS credentials from the environment (env vars or IAM) + \ "true" +env_auth> 1 +AWS Access Key ID - leave blank for anonymous access or runtime credentials. +access_key_id> YOURACCESSKEY +AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials. +secret_access_key> YOURSECRETACCESSKEY +Region to connect to. +Choose a number from below, or type in your own value + / The default endpoint + 1 | US Region, Northern Virginia, or Pacific Northwest. + | Leave location constraint empty. + \ "us-east-1" +[snip] +region> +Endpoint for S3 API. +Leave blank if using Liara to use the default endpoint for the region. +Specify if using an S3 clone such as Ceph. +endpoint> storage.iran.liara.space +Canned ACL used when creating buckets and/or storing objects in S3. +For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl +Choose a number from below, or type in your own value + 1 / Owner gets FULL_CONTROL. No one else has access rights (default). + \ "private" +[snip] +acl> +The server-side encryption algorithm used when storing this object in S3. +Choose a number from below, or type in your own value + 1 / None + \ "" + 2 / AES256 + \ "AES256" +server_side_encryption> +The storage class to use when storing objects in S3. +Choose a number from below, or type in your own value + 1 / Default + \ "" + 2 / Standard storage class + \ "STANDARD" +storage_class> +Remote config +-------------------- +[Liara] +env_auth = false +access_key_id = YOURACCESSKEY +secret_access_key = YOURSECRETACCESSKEY +endpoint = storage.iran.liara.space +location_constraint = +acl = +server_side_encryption = +storage_class = +-------------------- +y) Yes this is OK +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +This will leave the config file looking like this. + +``` +[Liara] +type = s3 +provider = Liara +env_auth = false +access_key_id = YOURACCESSKEY +secret_access_key = YOURSECRETACCESSKEY +region = +endpoint = storage.iran.liara.space +location_constraint = +acl = +server_side_encryption = +storage_class = +``` + ### ArvanCloud {#arvan-cloud} [ArvanCloud](https://www.arvancloud.com/en/products/cloud-storage) ArvanCloud Object Storage goes beyond the limited traditional file storage. @@ -22214,7 +22655,7 @@ name> ArvanCloud Type of storage to configure. Choose a number from below, or type in your own value [snip] -XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio) +XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Liara, Minio) \ "s3" [snip] Storage> s3 @@ -22337,7 +22778,7 @@ Choose a number from below, or type in your own value \ "alias" 3 / Amazon Drive \ "amazon cloud drive" - 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Minio, and Tencent COS + 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS \ "s3" [snip] Storage> s3 @@ -23184,9 +23625,10 @@ Choose a number from below, or type in your own value \ "enterprise" box_sub_type> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -23303,9 +23745,10 @@ Already have a token - refresh? y) Yes n) No y/n> y -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -23589,6 +24032,8 @@ Reverse Solidus). Box only supports filenames up to 255 characters in length. +Box has [API rate limits](https://developer.box.com/guides/api-calls/permissions-and-errors/rate-limits/) that sometimes reduce the speed of rclone. + `rclone about` is not supported by the Box backend. Backends without this capability cannot determine free space for an rclone mount or use policy `mfs` (most free space) as a member of an rclone union @@ -23596,7 +24041,7 @@ remote. See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/) -# Cache (DEPRECATED) +# Cache The `cache` remote wraps another existing remote and stores file structure and its data for long running tasks like `rclone mount`. @@ -24279,7 +24724,7 @@ Print stats on the cache backend in JSON format. -# Chunker (BETA) +# Chunker The `chunker` overlay transparently splits large files into smaller chunks during upload to wrapped remote and transparently assembles them back @@ -24798,9 +25243,10 @@ y) Yes n) No y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -24995,7 +25441,7 @@ remote. See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/) -# Crypt +# Crypt Rclone `crypt` remotes encrypt and decrypt other remotes. @@ -25761,7 +26207,7 @@ a salt. * [rclone cryptdecode](https://rclone.org/commands/rclone_cryptdecode/) - Show forward/reverse mapping of encrypted filenames -# Compress (Experimental) +# Compress ## Warning @@ -26132,6 +26578,16 @@ d) Delete this remote y/e/d> y ``` +See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a +machine with no Internet browser available. + +Note that rclone runs a webserver on your local machine to collect the +token as returned from Dropbox. This only +runs from the moment it opens your browser to the moment you get back +the verification code. This is on `http://127.0.0.1:53682/` and it +may require you to unblock it temporarily if you are running a host +firewall, or use manual mode. + You can then use it like this, List directories in top level of your dropbox @@ -27046,7 +27502,7 @@ Use Implicit FTPS (FTP over TLS). When using implicit FTP over TLS the client connects using TLS right from the start which breaks compatibility with non-TLS-aware servers. This is usually served over port 990 rather -than port 21. Cannot be used in combination with explicit FTP. +than port 21. Cannot be used in combination with explicit FTPS. Properties: @@ -27061,7 +27517,7 @@ Use Explicit FTPS (FTP over TLS). When using explicit FTP over TLS the client explicitly requests security from the server in order to upgrade a plain text connection -to an encrypted one. Cannot be used in combination with implicit FTP. +to an encrypted one. Cannot be used in combination with implicit FTPS. Properties: @@ -27430,9 +27886,10 @@ Choose a number from below, or type in your own value \ "DURABLE_REDUCED_AVAILABILITY" storage_class> 5 Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine or Y didn't work +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -27456,8 +27913,12 @@ d) Delete this remote y/e/d> y ``` +See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a +machine with no Internet browser available. + Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. This only +token as returned from Google if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on `http://127.0.0.1:53682/` and this it may require you to unblock it temporarily if you are running a host @@ -28030,9 +28491,10 @@ scope> 1 Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login. service_account_file> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine or Y didn't work +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -28059,8 +28521,12 @@ d) Delete this remote y/e/d> y ``` +See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a +machine with no Internet browser available. + Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. This only +token as returned from Google if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on `http://127.0.0.1:53682/` and it may require you to unblock it temporarily if you are running a host @@ -29572,9 +30038,10 @@ y) Yes n) No y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -29598,8 +30065,12 @@ d) Delete this remote y/e/d> y ``` +See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a +machine with no Internet browser available. + Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. This only +token as returned from Google if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on `http://127.0.0.1:53682/` and this may require you to unblock it temporarily if you are running a host @@ -29978,7 +30449,7 @@ Rclone cannot delete files anywhere except under `album`. The Google Photos API does not support deleting albums - see [bug #135714733](https://issuetracker.google.com/issues/135714733). -# Hasher (EXPERIMENTAL) +# Hasher Hasher is a special overlay backend to create remotes which handle checksums for other remotes. It's main functions include: @@ -30594,7 +31065,10 @@ Leave blank normally. scope_access> Edit advanced config? y/n> n -Use auto config? +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y/n> y If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=xxxxxxxxxxxxxxxxxxxxxx Log in and authorize rclone for access @@ -31272,6 +31746,22 @@ It can be triggered when you did a server-side copy. Reading metadata will also provide custom (non-standard nor reserved) ones. +## Filtering auto generated files + +The Internet Archive automatically creates metadata files after +upload. These can cause problems when doing an `rclone sync` as rclone +will try, and fail, to delete them. These metadata files are not +changeable, as they are created by the Internet Archive automatically. + +These auto-created files can be excluded from the sync using [metadata +filtering](https://rclone.org/filtering/#metadata). + + rclone sync ... --metadata-exclude "source=metadata" --metadata-exclude "format=Metadata" + +Which excludes from the sync any files which have the +`source=metadata` or `format=Metadata` flags which are added to +Internet Archive auto-created files. + ## Configuration Here is an example of making an internetarchive configuration. @@ -32271,8 +32761,21 @@ Currently it is recommended to disable 2FA on Mail.ru accounts intended for rclo ## Configuration -Here is an example of making a mailru configuration. First create a Mail.ru Cloud -account and choose a tariff, then run +Here is an example of making a mailru configuration. + +First create a Mail.ru Cloud account and choose a tariff. + +You will need to log in and create an app password for rclone. Rclone +**will not work** with your normal username and password - it will +give an error like `oauth2: server response missing access_token`. + +- Click on your user icon in the top right +- Go to Security / "Пароль и безопасность" +- Click password for apps / "Пароли для внешних приложений" +- Add the password - give it a name - eg "rclone" +- Copy the password and use this password below - your normal login password won't work. + +Now run rclone config @@ -32298,6 +32801,10 @@ User name (usually email) Enter a string value. Press Enter for the default (""). user> username@mail.ru Password + +This must be an app password - rclone will not work with your normal +password. See the Configuration section in the docs for how to make an +app password. y) Yes type in my own password g) Generate random password y/g> y @@ -32420,6 +32927,11 @@ Properties: Password. +This must be an app password - rclone will not work with your normal +password. See the Configuration section in the docs for how to make an +app password. + + **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/). Properties: @@ -33274,7 +33786,13 @@ docs](https://rclone.org/docs/#fast-list) for more details. The modified time is stored as metadata on the object with the `mtime` key. It is stored using RFC3339 Format time with nanosecond precision. The metadata is supplied during directory listings so -there is no overhead to using it. +there is no performance overhead to using it. + +If you wish to use the Azure standard `LastModified` time stored on +the object as the modified time, then use the `--use-server-modtime` +flag. Note that rclone can't set `LastModified`, so using the +`--update` flag when syncing is recommended if using +`--use-server-modtime`. ### Performance @@ -33310,11 +33828,80 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in chunks only have an MD5 if the source remote was capable of MD5 hashes, e.g. the local disk. -### Authenticating with Azure Blob Storage +### Authentication {#authentication} -Rclone has 3 ways of authenticating with Azure Blob Storage: +There are a number of ways of supplying credentials for Azure Blob +Storage. Rclone tries them in the order of the sections below. -#### Account and Key +#### Env Auth + +If the `env_auth` config parameter is `true` then rclone will pull +credentials from the environment or runtime. + +It tries these authentication methods in this order: + +1. Environment Variables +2. Managed Service Identity Credentials +3. Azure CLI credentials (as used by the az tool) + +These are described in the following sections + +##### Env Auth: 1. Environment Variables + +If `env_auth` is set and environment variables are present rclone +authenticates a service principal with a secret or certificate, or a +user with a password, depending on which environment variable are set. +It reads configuration from these variables, in the following order: + +1. Service principal with client secret + - `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its "directory" ID. + - `AZURE_CLIENT_ID`: the service principal's client ID + - `AZURE_CLIENT_SECRET`: one of the service principal's client secrets +2. Service principal with certificate + - `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its "directory" ID. + - `AZURE_CLIENT_ID`: the service principal's client ID + - `AZURE_CLIENT_CERTIFICATE_PATH`: path to a PEM or PKCS12 certificate file including the private key. + - `AZURE_CLIENT_CERTIFICATE_PASSWORD`: (optional) password for the certificate file. + - `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN`: (optional) Specifies whether an authentication request will include an x5c header to support subject name / issuer based authentication. When set to "true" or "1", authentication requests include the x5c header. +3. User with username and password + - `AZURE_TENANT_ID`: (optional) tenant to authenticate in. Defaults to "organizations". + - `AZURE_CLIENT_ID`: client ID of the application the user will authenticate to + - `AZURE_USERNAME`: a username (usually an email address) + - `AZURE_PASSWORD`: the user's password + +##### Env Auth: 2. Managed Service Identity Credentials + +When using Managed Service Identity if the VM(SS) on which this +program is running has a system-assigned identity, it will be used by +default. If the resource has no system-assigned but exactly one +user-assigned identity, the user-assigned identity will be used by +default. + +If the resource has multiple user-assigned identities you will need to +unset `env_auth` and set `use_msi` instead. See the [`use_msi` +section](#use_msi). + +##### Env Auth: 3. Azure CLI credentials (as used by the az tool) + +Credentials created with the `az` tool can be picked up using `env_auth`. + +For example if you were to login with a service principal like this: + + az login --service-principal -u XXX -p XXX --tenant XXX + +Then you could access rclone resources like this: + + rclone lsf :azureblob,env_auth,account=ACCOUNT:CONTAINER + +Or + + rclone lsf --azureblob-env-auth --azureblob-acccount=ACCOUNT :azureblob:CONTAINER + +Which is analogous to using the `az` tool: + + az storage blob list --container-name CONTAINER --account-name ACCOUNT --auth-mode login + +#### Account and Shared Key This is the most straight forward and least flexible way. Just fill in the `account` and `key` lines and leave the rest blank. @@ -33323,7 +33910,7 @@ in the `account` and `key` lines and leave the rest blank. This can be an account level SAS URL or container level SAS URL. -To use it leave `account`, `key` blank and fill in `sas_url`. +To use it leave `account` and `key` blank and fill in `sas_url`. An account level SAS URL or container level SAS URL can be obtained from the Azure portal or the Azure Storage Explorer. To get a @@ -33350,6 +33937,60 @@ Container level SAS URLs are useful for temporarily allowing third parties access to a single container or putting credentials into an untrusted environment such as a CI build server. +#### Service principal with client secret + +If these variables are set, rclone will authenticate with a service principal with a client secret. + +- `tenant`: ID of the service principal's tenant. Also called its "directory" ID. +- `client_id`: the service principal's client ID +- `client_secret`: one of the service principal's client secrets + +The credentials can also be placed in a file using the +`service_principal_file` configuration option. + +#### Service principal with certificate + +If these variables are set, rclone will authenticate with a service principal with certificate. + +- `tenant`: ID of the service principal's tenant. Also called its "directory" ID. +- `client_id`: the service principal's client ID +- `client_certificate_path`: path to a PEM or PKCS12 certificate file including the private key. +- `client_certificate_password`: (optional) password for the certificate file. +- `client_send_certificate_chain`: (optional) Specifies whether an authentication request will include an x5c header to support subject name / issuer based authentication. When set to "true" or "1", authentication requests include the x5c header. + +**NB** `client_certificate_password` must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/). + +#### User with username and password + +If these variables are set, rclone will authenticate with username and password. + +- `tenant`: (optional) tenant to authenticate in. Defaults to "organizations". +- `client_id`: client ID of the application the user will authenticate to +- `username`: a username (usually an email address) +- `password`: the user's password + +Microsoft doesn't recommend this kind of authentication, because it's +less secure than other authentication flows. This method is not +interactive, so it isn't compatible with any form of multi-factor +authentication, and the application must already have user or admin +consent. This credential can only authenticate work and school +accounts; it can't authenticate Microsoft accounts. + +**NB** `password` must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/). + +#### Managed Service Identity Credentials {#use_msi} + +If `use_msi` is set then managed service identity credentials are +used. This authentication only works when running in an Azure service. +`env_auth` needs to be unset to use this. + +However if you have multiple user identities to choose from these must +be explicitly specified using exactly one of the `msi_object_id`, +`msi_client_id`, or `msi_mi_res_id` parameters. + +If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is +set, this is is equivalent to using `env_auth`. + ### Standard options @@ -33357,9 +33998,15 @@ Here are the Standard options specific to azureblob (Microsoft Azure Blob Storag #### --azureblob-account -Storage Account Name. +Azure Storage Account Name. + +Set this to the Azure Storage Account Name in use. + +Leave blank to use SAS URL or Emulator, otherwise it needs to be set. + +If this is blank and if env_auth is set it will be read from the +environment variable `AZURE_STORAGE_ACCOUNT_NAME` if possible. -Leave blank to use SAS URL or Emulator. Properties: @@ -33368,30 +34015,22 @@ Properties: - Type: string - Required: false -#### --azureblob-service-principal-file +#### --azureblob-env-auth -Path to file containing credentials for use with a service principal. - -Leave blank normally. Needed only if you want to use a service principal instead of interactive login. - - $ az ad sp create-for-rbac --name "" \ - --role "Storage Blob Data Owner" \ - --scopes "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" \ - > azure-principal.json - -See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details. +Read credentials from runtime (environment variables, CLI or MSI). +See the [authentication docs](/azureblob#authentication) for full info. Properties: -- Config: service_principal_file -- Env Var: RCLONE_AZUREBLOB_SERVICE_PRINCIPAL_FILE -- Type: string -- Required: false +- Config: env_auth +- Env Var: RCLONE_AZUREBLOB_ENV_AUTH +- Type: bool +- Default: false #### --azureblob-key -Storage Account Key. +Storage Account Shared Key. Leave blank to use SAS URL or Emulator. @@ -33415,6 +34054,169 @@ Properties: - Type: string - Required: false +#### --azureblob-tenant + +ID of the service principal's tenant. Also called its directory ID. + +Set this if using +- Service principal with client secret +- Service principal with certificate +- User with username and password + + +Properties: + +- Config: tenant +- Env Var: RCLONE_AZUREBLOB_TENANT +- Type: string +- Required: false + +#### --azureblob-client-id + +The ID of the client in use. + +Set this if using +- Service principal with client secret +- Service principal with certificate +- User with username and password + + +Properties: + +- Config: client_id +- Env Var: RCLONE_AZUREBLOB_CLIENT_ID +- Type: string +- Required: false + +#### --azureblob-client-secret + +One of the service principal's client secrets + +Set this if using +- Service principal with client secret + + +Properties: + +- Config: client_secret +- Env Var: RCLONE_AZUREBLOB_CLIENT_SECRET +- Type: string +- Required: false + +#### --azureblob-client-certificate-path + +Path to a PEM or PKCS12 certificate file including the private key. + +Set this if using +- Service principal with certificate + + +Properties: + +- Config: client_certificate_path +- Env Var: RCLONE_AZUREBLOB_CLIENT_CERTIFICATE_PATH +- Type: string +- Required: false + +#### --azureblob-client-certificate-password + +Password for the certificate file (optional). + +Optionally set this if using +- Service principal with certificate + +And the certificate has a password. + + +**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/). + +Properties: + +- Config: client_certificate_password +- Env Var: RCLONE_AZUREBLOB_CLIENT_CERTIFICATE_PASSWORD +- Type: string +- Required: false + +### Advanced options + +Here are the Advanced options specific to azureblob (Microsoft Azure Blob Storage). + +#### --azureblob-client-send-certificate-chain + +Send the certificate chain when using certificate auth. + +Specifies whether an authentication request will include an x5c header +to support subject name / issuer based authentication. When set to +true, authentication requests include the x5c header. + +Optionally set this if using +- Service principal with certificate + + +Properties: + +- Config: client_send_certificate_chain +- Env Var: RCLONE_AZUREBLOB_CLIENT_SEND_CERTIFICATE_CHAIN +- Type: bool +- Default: false + +#### --azureblob-username + +User name (usually an email address) + +Set this if using +- User with username and password + + +Properties: + +- Config: username +- Env Var: RCLONE_AZUREBLOB_USERNAME +- Type: string +- Required: false + +#### --azureblob-password + +The user's password + +Set this if using +- User with username and password + + +**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/). + +Properties: + +- Config: password +- Env Var: RCLONE_AZUREBLOB_PASSWORD +- Type: string +- Required: false + +#### --azureblob-service-principal-file + +Path to file containing credentials for use with a service principal. + +Leave blank normally. Needed only if you want to use a service principal instead of interactive login. + + $ az ad sp create-for-rbac --name "" \ + --role "Storage Blob Data Owner" \ + --scopes "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" \ + > azure-principal.json + +See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details. + +It may be more convenient to put the credentials directly into the +rclone config file under the `client_id`, `tenant` and `client_secret` +keys instead of setting `service_principal_file`. + + +Properties: + +- Config: service_principal_file +- Env Var: RCLONE_AZUREBLOB_SERVICE_PRINCIPAL_FILE +- Type: string +- Required: false + #### --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure). @@ -33435,23 +34237,6 @@ Properties: - Type: bool - Default: false -#### --azureblob-use-emulator - -Uses local storage emulator if provided as 'true'. - -Leave blank if using real azure storage endpoint. - -Properties: - -- Config: use_emulator -- Env Var: RCLONE_AZUREBLOB_USE_EMULATOR -- Type: bool -- Default: false - -### Advanced options - -Here are the Advanced options specific to azureblob (Microsoft Azure Blob Storage). - #### --azureblob-msi-object-id Object ID of the user-assigned MSI to use, if any. @@ -33491,6 +34276,19 @@ Properties: - Type: string - Required: false +#### --azureblob-use-emulator + +Uses local storage emulator if provided as 'true'. + +Leave blank if using real azure storage endpoint. + +Properties: + +- Config: use_emulator +- Env Var: RCLONE_AZUREBLOB_USE_EMULATOR +- Type: bool +- Default: false + #### --azureblob-endpoint Endpoint for the service. @@ -33694,6 +34492,21 @@ Properties: - "container" - Allow full public read access for container and blob data. +#### --azureblob-no-check-container + +If set, don't attempt to check the container exists or create it. + +This can be useful when trying to minimise the number of transactions +rclone does if you know the container exists already. + + +Properties: + +- Config: no_check_container +- Env Var: RCLONE_AZUREBLOB_NO_CHECK_CONTAINER +- Type: bool +- Default: false + #### --azureblob-no-head-object If set, do not do HEAD before GET when getting objects. @@ -33707,6 +34520,18 @@ Properties: +### Custom upload headers + +You can set custom upload headers with the `--header-upload` flag. + +- Cache-Control +- Content-Disposition +- Content-Encoding +- Content-Language +- Content-Type + +Eg `--header-upload "Content-Type: text/potato"` + ## Limitations MD5 sums are only uploaded with chunked files if the source has an MD5 @@ -33721,11 +34546,20 @@ See [List of backends that do not support rclone about](https://rclone.org/overv ## Azure Storage Emulator Support -You can run rclone with storage emulator (usually _azurite_). +You can run rclone with the storage emulator (usually _azurite_). -To do this, just set up a new remote with `rclone config` following instructions described in introduction and set `use_emulator` config as `true`. You do not need to provide default account name neither an account key. +To do this, just set up a new remote with `rclone config` following +the instructions in the introduction and set `use_emulator` in the +advanced settings as `true`. You do not need to provide a default +account name nor an account key. But you can override them in the +`account` and `key` options. (Prior to v1.61 they were hard coded to +_azurite_'s `devstoreaccount1`.) -Also, if you want to access a storage emulator instance running on a different machine, you can override _Endpoint_ parameter in advanced settings, setting it to `http(s)://:/devstoreaccount1` (e.g. `http://10.254.2.5:10000/devstoreaccount1`). +Also, if you want to access a storage emulator instance running on a +different machine, you can override the `endpoint` parameter in the +advanced settings, setting it to +`http(s)://:/devstoreaccount1` +(e.g. `http://10.254.2.5:10000/devstoreaccount1`). # Microsoft OneDrive @@ -33776,9 +34610,10 @@ y) Yes n) No y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -34437,6 +35272,31 @@ permissions as an admin, take a look at the docs: [1](https://docs.microsoft.com/en-us/sharepoint/turn-external-sharing-on-or-off), [2](https://support.microsoft.com/en-us/office/set-up-and-manage-access-requests-94b26e0b-2822-49d4-929a-8455698654b3). +### Can not access `Shared` with me files + +Shared with me files is not supported by rclone [currently](https://github.com/rclone/rclone/issues/4062), but there is a workaround: + +1. Visit [https://onedrive.live.com](https://onedrive.live.com/) +2. Right click a item in `Shared`, then click `Add shortcut to My files` in the context +
    + Screenshot (Shared with me) + + ![make_shortcut](https://user-images.githubusercontent.com/60313789/206118040-7e762b3b-aa61-41a1-8649-cc18889f3572.png) +
    + +3. The shortcut will appear in `My files`, you can access it with rclone, it behaves like a normal folder/file. +
    + Screenshot (My Files) + + ![in_my_files](https://i.imgur.com/0S8H3li.png) +
    + +
    + Screenshot (rclone mount) + + ![rclone_mount](https://i.imgur.com/2Iq66sW.png) +
    + # OpenDrive Paths are specified as `remote:path` @@ -35661,7 +36521,7 @@ Properties: signs in file names. rclone will transparently [encode](https://rclone.org/overview/#encoding) them for you, but you'd better be aware -# Swift +# Swift Swift refers to [OpenStack Object Storage](https://docs.openstack.org/swift/latest/). Commercial implementations of that being: @@ -36316,9 +37176,10 @@ client_id> Pcloud App Client Secret - leave blank normally. client_secret> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -36600,9 +37461,10 @@ Storage> premiumizeme ** See help for premiumizeme backend at: https://rclone.org/premiumizeme/ ** Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -36749,9 +37611,10 @@ Storage> putio ** See help for putio backend at: https://rclone.org/putio/ ** Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -36784,8 +37647,12 @@ q) Quit config e/n/d/r/c/s/q> q ``` +See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a +machine with no Internet browser available. + Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. This only +token as returned from put.io if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on `http://127.0.0.1:53682/` and this it may require you to unblock it temporarily if you are running a host @@ -36846,7 +37713,7 @@ If you want to avoid ever hitting these limits, you may use the `--tpslimit` flag with a low number. Note that the imposed limits may be different for different operations, and may change over time. -# Seafile +# Seafile This is a backend for the [Seafile](https://www.seafile.com/) storage service: - It works with both the free community edition or the professional edition. @@ -37753,6 +38620,9 @@ This enables the use of the following insecure ciphers and key exchange methods: Those algorithms are insecure and may allow plaintext data to be recovered by an attacker. +This must be false if you use either ciphers or key_exchange advanced options. + + Properties: - Config: use_insecure_cipher @@ -38085,6 +38955,64 @@ Properties: - Type: SpaceSepList - Default: +#### --sftp-ciphers + +Space separated list of ciphers to be used for session encryption, ordered by preference. + +At least one must match with server configuration. This can be checked for example using ssh -Q cipher. + +This must not be set if use_insecure_cipher is true. + +Example: + + aes128-ctr aes192-ctr aes256-ctr aes128-gcm@openssh.com aes256-gcm@openssh.com + + +Properties: + +- Config: ciphers +- Env Var: RCLONE_SFTP_CIPHERS +- Type: SpaceSepList +- Default: + +#### --sftp-key-exchange + +Space separated list of key exchange algorithms, ordered by preference. + +At least one must match with server configuration. This can be checked for example using ssh -Q kex. + +This must not be set if use_insecure_cipher is true. + +Example: + + sntrup761x25519-sha512@openssh.com curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256 + + +Properties: + +- Config: key_exchange +- Env Var: RCLONE_SFTP_KEY_EXCHANGE +- Type: SpaceSepList +- Default: + +#### --sftp-macs + +Space separated list of MACs (message authentication code) algorithms, ordered by preference. + +At least one must match with server configuration. This can be checked for example using ssh -Q mac. + +Example: + + umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com + + +Properties: + +- Config: macs +- Env Var: RCLONE_SFTP_MACS +- Type: SpaceSepList +- Default: + ## Limitations @@ -39860,7 +40788,7 @@ vendor = other bearer_token_command = oidc-token XDC ``` -# Yandex Disk +# Yandex Disk [Yandex Disk](https://disk.yandex.com) is a cloud storage solution created by [Yandex](https://yandex.com). @@ -39890,9 +40818,10 @@ client_id> Yandex Client Secret - leave blank normally. client_secret> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -40087,7 +41016,7 @@ Token generation will work without a mail account, but Rclone won't be able to c [403 - DiskUnsupportedUserAccountTypeError] User account type is not supported. ``` -# Zoho Workdrive +# Zoho Workdrive [Zoho WorkDrive](https://www.zoho.com/workdrive/) is a cloud storage solution created by [Zoho](https://zoho.com). @@ -40128,9 +41057,10 @@ y) Yes n) No (default) y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> @@ -40760,8 +41690,8 @@ Properties: Don't check to see if the files change during upload. Normally rclone checks the size and modification time of files as they -are being uploaded and aborts with a message which starts "can't copy -- source file is being updated" if the file changes during upload. +are being uploaded and aborts with a message which starts "can't copy - +source file is being updated" if the file changes during upload. However on some file systems this modification time check may fail (e.g. [Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this @@ -40954,6 +41884,135 @@ Options: # Changelog +## v1.61.0 - 2022-12-20 + +[See commits](https://github.com/rclone/rclone/compare/v1.60.0...v1.61.0) + +* New backends + * New S3 providers + * [Liara LOS](https://rclone.org/s3/#liara-cloud) (MohammadReza) +* New Features + * build: Add vulnerability testing using govulncheck (albertony) + * cmd: Enable `SIGINFO` (Ctrl-T) handler on FreeBSD, NetBSD, OpenBSD and Dragonfly BSD (x3-apptech) + * config: Add [config/setpath](https://rclone.org/rc/#config-setpath) for setting config path via rc/librclone (Nick Craig-Wood) + * dedupe + * Count Checks in the stats while scanning for duplicates (Nick Craig-Wood) + * Make dedupe obey the filters (Nick Craig-Wood) + * dlna: Properly attribute code used from https://github.com/anacrolix/dms (Nick Craig-Wood) + * docs + * Add minimum versions and status badges to backend and command docs (Nick Craig-Wood, albertony) + * Remote names may not start or end with space (albertony) + * filter: Add metadata filters [--metadata-include/exclude/filter](https://rclone.org/filtering/#metadata) and friends (Nick Craig-Wood) + * fs + * Make all duration flags take `y`, `M`, `w`, `d` etc suffixes (Nick Craig-Wood) + * Add global flag `--color` to control terminal colors (Kevin Verstaen) + * fspath: Allow unicode numbers and letters in remote names (albertony) + * lib/file: Improve error message for creating dir on non-existent network host on windows (albertony) + * lib/http: Finish port of rclone servers to `lib/http` (Tom Mombourquette, Nick Craig-Wood) + * lib/oauthutil: Improved usability of config flows needing web browser (Ole Frost) + * ncdu + * Add support for modification time (albertony) + * Fallback to sort by name also for sort by average size (albertony) + * Rework to use tcell directly instead of the termbox wrapper (eNV25) + * rc: Add commands to set [GC Percent](https://rclone.org/rc/#debug-set-gc-percent) & [Memory Limit](/rc/#debug-set-soft-memory-limit) (go 1.19+) (Anagh Kumar Baranwal) + * rcat: Preserve metadata when Copy falls back to Rcat (Nick Craig-Wood) + * rcd: Refactor rclone rc server to use `lib/http` (Nick Craig-Wood) + * rcserver: Avoid generating default credentials with htpasswd (Kamui) + * restic: Refactor to use `lib/http` (Nolan Woods) + * serve http: Support unix sockets and multiple listeners (Tom Mombourquette) + * serve webdav: Refactor to use `lib/http` (Nick Craig-Wood) + * test: Replace defer cleanup with `t.Cleanup` (Eng Zer Jun) + * test memory: Read metadata if `-M` flag is specified (Nick Craig-Wood) + * wasm: Comply with `wasm_exec.js` licence terms (Matthew Vernon) +* Bug Fixes + * build: Update `golang.org/x/net/http2` to fix GO-2022-1144 (Nick Craig-Wood) + * restic: Fix typo in docs 'remove' should be 'remote' (asdffdsazqqq) + * serve dlna: Fix panic: Logger uninitialized. (Nick Craig-Wood) +* Mount + * Update cgofuse for FUSE-T support for mounting volumes on Mac (Nick Craig-Wood) +* VFS + * Windows: fix slow opening of exe files by not truncating files when not necessary (Nick Craig-Wood) + * Fix IO Error opening a file with `O_CREATE|O_RDONLY` in `--vfs-cache-mode` not full (Nick Craig-Wood) +* Crypt + * Fix compress wrapping crypt giving upload errors (Nick Craig-Wood) +* Azure Blob + * Port to new SDK (Nick Craig-Wood) + * Revamp authentication to include all methods and docs (Nick Craig-Wood) + * Port old authentication methods to new SDK (Nick Craig-Wood, Brad Ackerman) + * Thanks to [Stonebranch](https://www.stonebranch.com/) for sponsoring this work. + * Add `--azureblob-no-check-container` to assume container exists (Nick Craig-Wood) + * Add `--use-server-modtime` support (Abdullah Saglam) + * Add support for custom upload headers (rkettelerij) + * Allow emulator account/key override (Roel Arents) + * Support simple "environment credentials" (Nathaniel Wesley Filardo) + * Ignore `AuthorizationFailure` when trying to create a create a container (Nick Craig-Wood) +* Box + * Added note on Box API rate limits (Ole Frost) +* Drive + * Handle shared drives with leading/trailing space in name (related to) (albertony) +* FTP + * Update help text of implicit/explicit TLS options to refer to FTPS instead of FTP (ycdtosa) + * Improve performance to speed up `--files-from` and `NewObject` (Anthony Pessy) +* HTTP + * Parse GET responses when `no_head` is set (Arnie97) + * Do not update object size based on `Range` requests (Arnie97) + * Support `Content-Range` response header (Arnie97) +* Onedrive + * Document workaround for shared with me files (vanplus) +* S3 + * Add Liara LOS to provider list (MohammadReza) + * Add DigitalOcean Spaces regions `sfo3`, `fra1`, `syd1` (Jack) + * Avoid privileged `GetBucketLocation` to resolve s3 region (Anthony Pessy) + * Stop setting object and bucket ACL to `private` if it is an empty string (Philip Harvey) + * If bucket or object ACL is empty string then don't add `X-Amz-Acl:` header (Nick Craig-Wood) + * Reduce memory consumption for s3 objects (Erik Agterdenbos) + * Fix listing loop when using v2 listing on v1 server (Nick Craig-Wood) + * Fix nil pointer exception when using Versions (Nick Craig-Wood) + * Fix excess memory usage when using versions (Nick Craig-Wood) + * Ignore versionIDs from uploads unless using `--s3-versions` or `--s3-versions-at` (Nick Craig-Wood) +* SFTP + * Add configuration options to set ssh Ciphers / MACs / KeyExchange (dgouju) + * Auto-detect shell type for fish (albertony) + * Fix NewObject with leading / (Nick Craig-Wood) +* Smb + * Fix issue where spurious dot directory is created (albertony) +* Storj + * Implement server side Copy (Kaloyan Raev) + +## v1.60.1 - 2022-11-17 + +[See commits](https://github.com/rclone/rclone/compare/v1.60.0...v1.60.1) + +* Bug Fixes + * lib/cache: Fix alias backend shutting down too soon (Nick Craig-Wood) + * wasm: Fix walltime link error by adding up-to-date wasm_exec.js (João Henrique Franco) + * docs + * Update faq.md with bisync (Samuel Johnson) + * Corrected download links in windows install docs (coultonluke) + * Add direct download link for windows arm64 (albertony) + * Remove link to rclone slack as it is no longer supported (Nick Craig-Wood) + * Faq: how to use a proxy server that requires a username and password (asdffdsazqqq) + * Oracle-object-storage: doc fix (Manoj Ghosh) + * Fix typo `remove` in rclone_serve_restic command (Joda Stößer) + * Fix character that was incorrectly interpreted as markdown (Clément Notin) +* VFS + * Fix deadlock caused by cache cleaner and upload finishing (Nick Craig-Wood) +* Local + * Clean absolute paths (albertony) + * Fix -L/--copy-links with filters missing directories (Nick Craig-Wood) +* Mailru + * Note that an app password is now needed (Nick Craig-Wood) + * Allow timestamps to be before the epoch 1970-01-01 (Nick Craig-Wood) +* S3 + * Add provider quirk `--s3-might-gzip` to fix corrupted on transfer: sizes differ (Nick Craig-Wood) + * Allow Storj to server side copy since it seems to work now (Nick Craig-Wood) + * Fix for unchecked err value in s3 listv2 (Aaron Gokaslan) + * Add additional Wasabi locations (techknowlogick) +* Smb + * Fix `Failed to sync: context canceled` at the end of syncs (Nick Craig-Wood) +* WebDAV + * Fix Move/Copy/DirMove when using -server-side-across-configs (Nick Craig-Wood) + ## v1.60.0 - 2022-10-21 [See commits](https://github.com/rclone/rclone/compare/v1.59.0...v1.60.0) @@ -45237,9 +46296,8 @@ of metadata, which breaks the desired 1:1 mapping of files to objects. ### Can rclone do bi-directional sync? ### -No, not at present. rclone only does uni-directional sync from A -> -B. It may do in the future though since it has all the primitives - it -just requires writing the algorithm to do it. +Yes, since rclone v1.58.0, [bidirectional cloud sync](https://rclone.org/bisync/) is +available. ### Can I use rclone with an HTTP proxy? ### @@ -45264,6 +46322,14 @@ possibilities. So, on Linux, you may end up with code similar to export HTTP_PROXY=$http_proxy export HTTPS_PROXY=$http_proxy + +Note: If the proxy server requires a username and password, then use + + export http_proxy=http://username:password@proxyserver:12345 + export https_proxy=$http_proxy + export HTTP_PROXY=$http_proxy + export HTTPS_PROXY=$http_proxy + The `NO_PROXY` allows you to disable the proxy for specific hosts. Hosts must be comma separated, and can contain domains or parts. For instance "foo.com" also matches "bar.foo.com". @@ -45638,6 +46704,7 @@ put them back in again.` >}} * Jay * andrea rota * nicolov + * Matt Joiner * Dario Guzik * qip * yair@unicorn @@ -46057,6 +47124,28 @@ put them back in again.` >}} * Manoj Ghosh * Tom Mombourquette * Robert Newson + * Samuel Johnson + * coultonluke + * Anthony Pessy + * Philip Harvey + * dgouju + * Clément Notin + * x3-apptech <66947598+x3-apptech@users.noreply.github.com> + * Arnie97 + * Roel Arents <2691308+roelarents@users.noreply.github.com> + * Aaron Gokaslan + * techknowlogick + * rkettelerij + * Kamui + * asdffdsazqqq <90116442+asdffdsazqqq@users.noreply.github.com> + * Nathaniel Wesley Filardo + * ycdtosa + * Erik Agterdenbos + * Kevin Verstaen <48050031+kverstae@users.noreply.github.com> + * MohammadReza + * vanplus <60313789+vanplus@users.noreply.github.com> + * Jack <16779171+jkpe@users.noreply.github.com> + * Abdullah Saglam # Contact the rclone project # diff --git a/MANUAL.txt b/MANUAL.txt index 6cdd6ee0a..81320892b 100644 --- a/MANUAL.txt +++ b/MANUAL.txt @@ -1,6 +1,6 @@ rclone(1) User Manual Nick Craig-Wood -Oct 21, 2022 +Dec 20, 2022 Rclone syncs your files to cloud storage @@ -121,6 +121,7 @@ S3, that work out of the box.) - IDrive e2 - IONOS Cloud - Koofr +- Liara Object Storage - Mail.ru Cloud - Memset Memstore - Mega @@ -2181,7 +2182,7 @@ This will look something like (some irrelevant detail removed): "State": "*oauth-islocal,teamdrive,,", "Option": { "Name": "config_is_local", - "Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n", + "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Default": true, "Examples": [ { @@ -2518,7 +2519,7 @@ This will look something like (some irrelevant detail removed): "State": "*oauth-islocal,teamdrive,,", "Option": { "Name": "config_is_local", - "Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n", + "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Default": true, "Examples": [ { @@ -3360,6 +3361,7 @@ Options --hash Include hashes in the output (may take longer) --hash-type stringArray Show only this hash type (may be repeated) -h, --help help for lsjson + -M, --metadata Add metadata to the listing --no-mimetype Don't read the mime type (can speed things up) --no-modtime Don't read the modification time (can speed things up) --original Show the ID of the underlying Object @@ -4086,14 +4088,14 @@ Options --allow-other Allow access to other users (not supported on Windows) --allow-root Allow access to root user (not supported on Windows) --async-read Use asynchronous reads (not supported on Windows) (default true) - --attr-timeout duration Time for which file/directory attributes are cached (default 1s) + --attr-timeout Duration Time for which file/directory attributes are cached (default 1s) --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows) - --daemon-timeout duration Time limit for rclone to respond to kernel (not supported on Windows) - --daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) + --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s) + --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) --debug-fuse Debug the FUSE internals - needs -v --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) @@ -4107,24 +4109,24 @@ Options --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true) --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only) -o, --option stringArray Option for libfuse/WinFsp (repeat if required) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) @@ -4204,11 +4206,12 @@ toggle the help on and off. The supported keys are: ↑,↓ or k,j to Move →,l to enter ←,h to return - c toggle counts g toggle graph + c toggle counts a toggle average size in directory + m toggle modified time u toggle human-readable format - n,s,C,A sort by name,size,count,average size + n,s,C,A,M sort by name,size,count,asize,mtime d delete file/directory v select file/directory V enter visual select mode @@ -4435,6 +4438,128 @@ browser when rclone is run. See the rc documentation for more info on the rc flags. +Server options + +Use --addr to specify which IP address and port the server should listen +on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By +default it only listens on localhost. You can use port :0 to let the OS +choose an available port. + +If you set --addr to listen on a public or LAN accessible IP address +then using Authentication is advised - see the next section for info. + +You can use a unix socket by setting the url to unix:///path/to/socket +or just by using an absolute path name. Note that unix sockets bypass +the authentication - this is expected to be done with file system +permissions. + +--addr may be repeated to listen on multiple IPs/ports/sockets. + +--server-read-timeout and --server-write-timeout can be used to control +the timeouts on the server. Note that this is the total time for a +transfer. + +--max-header-bytes controls the maximum number of bytes the server will +accept in the HTTP header. + +--baseurl controls the URL prefix that rclone serves from. By default +rclone will serve from the root. If you used --baseurl "/rclone" then +rclone would serve from a URL starting with "/rclone/". This is useful +if you wish to proxy rclone serve. Rclone automatically inserts leading +and trailing "/" on --baseurl, so --baseurl "rclone", +--baseurl "/rclone" and --baseurl "/rclone/" are all treated +identically. + +TLS (SSL) + +By default this will serve over http. If you want you can serve over +https. You will need to supply the --cert and --key flags. If you wish +to do client side certificate validation then you will need to supply +--client-ca also. + +--cert should be a either a PEM encoded certificate or a concatenation +of that with the CA certificate. --key should be the PEM encoded private +key and --client-ca should be the PEM encoded client certificate +authority certificate. + +--min-tls-version is minimum TLS version that is acceptable. Valid +values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). + +Template + +--template allows a user to specify a custom markup template for HTTP +and WebDAV serve functions. The server exports the following markup to +be used within the template to server pages: + + ----------------------------------------------------------------------- + Parameter Description + ----------------------------------- ----------------------------------- + .Name The full path of a file/directory. + + .Title Directory listing of .Name + + .Sort The current sort used. This is + changeable via ?sort= parameter + + Sort Options: + namedirfirst,name,size,time + (default namedirfirst) + + .Order The current ordering used. This is + changeable via ?order= parameter + + Order Options: asc,desc (default + asc) + + .Query Currently unused. + + .Breadcrumb Allows for creating a relative + navigation + + -- .Link The relative to the root link of + the Text. + + -- .Text The Name of the directory. + + .Entries Information about a specific + file/directory. + + -- .URL The 'url' of an entry. + + -- .Leaf Currently same as 'URL' but + intended to be 'just' the name. + + -- .IsDir Boolean for if an entry is a + directory or not. + + -- .Size Size in Bytes of the entry. + + -- .ModTime The UTC timestamp of an entry. + ----------------------------------------------------------------------- + +Authentication + +By default this will serve files without needing a login. + +You can either use an htpasswd file which can take lots of users, or set +a single username and password with the --user and --pass flags. + +Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is in +standard apache format and supports MD5, SHA1 and BCrypt for basic +authentication. Bcrypt is recommended. + +To create an htpasswd file: + + touch htpasswd + htpasswd -B htpasswd user + htpasswd -B htpasswd anotherUser + +The password file can be updated while rclone is running. + +Use --realm to set the authentication realm. + +Use --salt to change the password hashing salt from the default. + rclone rcd * [flags] Options @@ -4937,8 +5062,8 @@ only with caching. Options --addr string The ip:port or :port to bind the DLNA http server to (default ":7879") - --announce-interval duration The interval between SSDP announcements (default 12m0s) - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --announce-interval Duration The interval between SSDP announcements (default 12m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -4949,24 +5074,24 @@ Options --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) See the global flags page for global options not listed here. @@ -5345,15 +5470,15 @@ Options --allow-other Allow access to other users (not supported on Windows) --allow-root Allow access to root user (not supported on Windows) --async-read Use asynchronous reads (not supported on Windows) (default true) - --attr-timeout duration Time for which file/directory attributes are cached (default 1s) + --attr-timeout Duration Time for which file/directory attributes are cached (default 1s) --base-dir string Base directory for volumes (default "/var/lib/docker-volumes/rclone") --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows) - --daemon-timeout duration Time limit for rclone to respond to kernel (not supported on Windows) - --daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) + --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s) + --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) --debug-fuse Debug the FUSE internals - needs -v --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --forget-state Skip restoring previous state @@ -5369,26 +5494,26 @@ Options --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true) --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only) -o, --option stringArray Option for libfuse/WinFsp (repeat if required) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --socket-addr string Address or absolute path (default: /run/docker/plugins/rclone.sock) --socket-gid int GID for unix socket (default: current process GID) (default 1000) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) @@ -5822,7 +5947,7 @@ Options --addr string IPaddress:Port or :Port to bind server to (default "localhost:2121") --auth-proxy string A program to use to create the backend from the auth --cert string TLS PEM key (concatenation of certificate and CA certificate) - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -5833,26 +5958,26 @@ Options --no-seek Don't allow seeking in files --pass string Password for authentication (empty value allow every password) --passive-port string Passive port range to use (default "30000-32000") - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --public-ip string Public IP address to advertise for passive connections --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication (default "anonymous") - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) See the global flags page for global options not listed here. @@ -5887,6 +6012,13 @@ choose an available port. If you set --addr to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. +You can use a unix socket by setting the url to unix:///path/to/socket +or just by using an absolute path name. Note that unix sockets bypass +the authentication - this is expected to be done with file system +permissions. + +--addr may be repeated to listen on multiple IPs/ports/sockets. + --server-read-timeout and --server-write-timeout can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -5902,7 +6034,7 @@ and trailing "/" on --baseurl, so --baseurl "rclone", --baseurl "/rclone" and --baseurl "/rclone/" are all treated identically. -SSL/TLS +TLS (SSL) By default this will serve over http. If you want you can serve over https. You will need to supply the --cert and --key flags. If you wish @@ -6315,47 +6447,47 @@ only with caching. Options - --addr string IPaddress:Port or :Port to bind server to (default "127.0.0.1:8080") + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --baseurl string Prefix for URLs - leave blank for root - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for http --htpasswd string A htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --realm string Realm for authentication --salt string Password hashing salt (default "dlPL2MqE") - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) See the global flags page for global options not listed here. @@ -6369,7 +6501,7 @@ Serve the remote for restic's REST API. Synopsis -Run a basic web server to serve a remove over restic's REST backend API +Run a basic web server to serve a remote over restic's REST backend API over HTTP. This allows restic to use rclone as a data storage mechanism for cloud providers that restic does not support directly. @@ -6451,13 +6583,20 @@ starting with a path of //. Server options Use --addr to specify which IP address and port the server should listen -on, e.g. --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By +on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port. If you set --addr to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. +You can use a unix socket by setting the url to unix:///path/to/socket +or just by using an absolute path name. Note that unix sockets bypass +the authentication - this is expected to be done with file system +permissions. + +--addr may be repeated to listen on multiple IPs/ports/sockets. + --server-read-timeout and --server-write-timeout can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -6473,55 +6612,20 @@ and trailing "/" on --baseurl, so --baseurl "rclone", --baseurl "/rclone" and --baseurl "/rclone/" are all treated identically. ---template allows a user to specify a custom markup template for HTTP -and WebDAV serve functions. The server exports the following markup to -be used within the template to server pages: +TLS (SSL) - ----------------------------------------------------------------------- - Parameter Description - ----------------------------------- ----------------------------------- - .Name The full path of a file/directory. +By default this will serve over http. If you want you can serve over +https. You will need to supply the --cert and --key flags. If you wish +to do client side certificate validation then you will need to supply +--client-ca also. - .Title Directory listing of .Name +--cert should be a either a PEM encoded certificate or a concatenation +of that with the CA certificate. --key should be the PEM encoded private +key and --client-ca should be the PEM encoded client certificate +authority certificate. - .Sort The current sort used. This is - changeable via ?sort= parameter - - Sort Options: - namedirfirst,name,size,time - (default namedirfirst) - - .Order The current ordering used. This is - changeable via ?order= parameter - - Order Options: asc,desc (default - asc) - - .Query Currently unused. - - .Breadcrumb Allows for creating a relative - navigation - - -- .Link The relative to the root link of - the Text. - - -- .Text The Name of the directory. - - .Entries Information about a specific - file/directory. - - -- .URL The 'url' of an entry. - - -- .Leaf Currently same as 'URL' but - intended to be 'just' the name. - - -- .IsDir Boolean for if an entry is a - directory or not. - - -- .Size Size in Bytes of the entry. - - -- .ModTime The UTC timestamp of an entry. - ----------------------------------------------------------------------- +--min-tls-version is minimum TLS version that is acceptable. Valid +values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). Authentication @@ -6544,43 +6648,30 @@ The password file can be updated while rclone is running. Use --realm to set the authentication realm. -SSL/TLS - -By default this will serve over HTTP. If you want you can serve over -HTTPS. You will need to supply the --cert and --key flags. If you wish -to do client side certificate validation then you will need to supply ---client-ca also. - ---cert should be either a PEM encoded certificate or a concatenation of -that with the CA certificate. --key should be the PEM encoded private -key and --client-ca should be the PEM encoded client certificate -authority certificate. - ---min-tls-version is minimum TLS version that is acceptable. Valid -values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +Use --salt to change the password hashing salt from the default. rclone serve restic remote:path [flags] Options - --addr string IPaddress:Port or :Port to bind server to (default "localhost:8080") + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --append-only Disallow deletion of repository data --baseurl string Prefix for URLs - leave blank for root --cache-objects Cache listed objects (default true) - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with -h, --help help for restic - --htpasswd string htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --htpasswd string A htpasswd file - if not provided no authentication is done + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --pass string Password for authentication --private-repos Users can only access their private repo - --realm string Realm for authentication (default "rclone") - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --realm string Realm for authentication + --salt string Password hashing salt (default "dlPL2MqE") + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --stdio Run an HTTP2 server on stdin/stdout - --template string User-specified template --user string User name for authentication See the global flags page for global options not listed here. @@ -7044,7 +7135,7 @@ Options --addr string IPaddress:Port or :Port to bind server to (default "localhost:2022") --auth-proxy string A program to use to create the backend from the auth --authorized-keys string Authorized keys file (default "~/.ssh/authorized_keys") - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -7055,26 +7146,26 @@ Options --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --stdio Run an sftp server on stdin/stdout --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) See the global flags page for global options not listed here. @@ -7106,13 +7197,20 @@ or "SHA-1". Use the hashsum command to see the full list. Server options Use --addr to specify which IP address and port the server should listen -on, e.g. --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By +on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port. If you set --addr to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. +You can use a unix socket by setting the url to unix:///path/to/socket +or just by using an absolute path name. Note that unix sockets bypass +the authentication - this is expected to be done with file system +permissions. + +--addr may be repeated to listen on multiple IPs/ports/sockets. + --server-read-timeout and --server-write-timeout can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -7128,6 +7226,23 @@ and trailing "/" on --baseurl, so --baseurl "rclone", --baseurl "/rclone" and --baseurl "/rclone/" are all treated identically. +TLS (SSL) + +By default this will serve over http. If you want you can serve over +https. You will need to supply the --cert and --key flags. If you wish +to do client side certificate validation then you will need to supply +--client-ca also. + +--cert should be a either a PEM encoded certificate or a concatenation +of that with the CA certificate. --key should be the PEM encoded private +key and --client-ca should be the PEM encoded client certificate +authority certificate. + +--min-tls-version is minimum TLS version that is acceptable. Valid +values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). + +Template + --template allows a user to specify a custom markup template for HTTP and WebDAV serve functions. The server exports the following markup to be used within the template to server pages: @@ -7199,20 +7314,7 @@ The password file can be updated while rclone is running. Use --realm to set the authentication realm. -SSL/TLS - -By default this will serve over HTTP. If you want you can serve over -HTTPS. You will need to supply the --cert and --key flags. If you wish -to do client side certificate validation then you will need to supply ---client-ca also. - ---cert should be either a PEM encoded certificate or a concatenation of -that with the CA certificate. --key should be the PEM encoded private -key and --client-ca should be the PEM encoded client certificate -authority certificate. - ---min-tls-version is minimum TLS version that is acceptable. Valid -values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +Use --salt to change the password hashing salt from the default. VFS - Virtual File System @@ -7608,49 +7710,50 @@ that rclone supports. Options - --addr string IPaddress:Port or :Port to bind server to (default "localhost:8080") + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --disable-dir-list Disable HTML directory list on GET request for a directory --etag-hash string Which hash to use for the ETag, or auto or blank for off --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for webdav - --htpasswd string htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --htpasswd string A htpasswd file - if not provided no authentication is done + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access - --realm string Realm for authentication (default "rclone") - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --realm string Realm for authentication + --salt string Password hashing salt (default "dlPL2MqE") + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) See the global flags page for global options not listed here. @@ -7745,7 +7848,7 @@ Log any change notify requests for the remote passed in. Options -h, --help help for changenotify - --poll-interval duration Time to wait between polling for changes (default 10s) + --poll-interval Duration Time to wait between polling for changes (default 10s) See the global flags page for global options not listed here. @@ -7800,7 +7903,7 @@ Options --check-normalization Check UTF-8 Normalization --check-streaming Check uploads with indeterminate file size -h, --help help for info - --upload-wait duration Wait after writing a file + --upload-wait Duration Wait after writing a file (default 0s) --write-json string Write results to file See the global flags page for global options not listed here. @@ -7844,6 +7947,7 @@ Options --files int Number of files to create (default 1000) --files-per-directory int Average number of files per directory (default 10) -h, --help help for makefiles + --max-depth int Maximum depth of directory hierarchy (default 10) --max-file-size SizeSuffix Maximum size of files to create (default 100) --max-name-length int Maximum size of file names (default 12) --min-file-size SizeSuffix Minimum size of file to create @@ -7954,7 +8058,6 @@ For a more interactive navigation of the remote see the ncdu command. Options -a, --all All files are listed (list . files too) - -C, --color Turn colorization on always -d, --dirs-only List directories only --dirsfirst List directories before files (-U disables) --full-path Print the full path prefix for each file @@ -8174,8 +8277,17 @@ Will get their own names Valid remote names Remote names are case sensitive, and must adhere to the following rules: -- May only contain 0-9, A-Z, a-z, _, -, . and space. - May not start -with - or space. +- May contain number, letter, _, -, . and space. - May not start with - +or space. - May not end with space. + +Starting with rclone version 1.61, any Unicode numbers and letters are +allowed, while in older versions it was limited to plain ASCII (0-9, +A-Z, a-z). If you use the same rclone configuration from different +shells, which may be configured with different character encoding, you +must be cautious to use characters that are possible to write in all of +them. This is mostly a problem on Windows, where the console +traditionally uses a non-Unicode character set - defined by the +so-called "code page". Quoting and the shell @@ -8672,6 +8784,18 @@ quicker than without the --checksum flag. When using this flag, rclone won't update mtimes of remote files if they are incorrect as it would normally. +--color WHEN + +Specifiy when colors (and other ANSI codes) should be added to the +output. + +AUTO (default) only allows ANSI codes when the output is a terminal + +NEVER never allow ANSI codes + +ALWAYS always add ANSI codes, regardless of the output format (terminal +or file) + --compare-dest=DIR When using sync, copy or move DIR is checked in addition to the @@ -10157,6 +10281,12 @@ For the filtering options - --min-age - --max-age - --dump filters +- --metadata-include +- --metadata-include-from +- --metadata-exclude +- --metadata-exclude-from +- --metadata-filter +- --metadata-filter-from See the filtering section. @@ -10369,13 +10499,14 @@ two ways of doing it, described below. Configuring using rclone authorize On the headless box run rclone config but answer N to the -Use auto config? question. +Use web browser to automatically authenticate? question. ... Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> n @@ -10446,13 +10577,14 @@ box port 53682 to local machine by using the following command: ssh -L localhost:53682:localhost:53682 username@remote_server Then on the headless box run rclone config and answer Y to the -Use auto config? question. +Use web browser to automatically authenticate? question. ... Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> y @@ -11207,6 +11339,44 @@ E.g. for the following directory structure: The command rclone ls --exclude-if-present .ignore dir1 does not list dir3, file3 or .ignore. +Metadata filters + +The metadata filters work in a very similar way to the normal file name +filters, except they match metadata on the object. + +The metadata should be specified as key=value patterns. This may be +wildcarded using the normal filter patterns or regular expressions. + +For example if you wished to list only local files with a mode of 100664 +you could do that with: + + rclone lsf -M --files-only --metadata-include "mode=100664" . + +Or if you wished to show files with an atime, mtime or btime at a given +date: + + rclone lsf -M --files-only --metadata-include "[abm]time=2022-12-16*" . + +Like file filtering, metadata filtering only applies to files not to +directories. + +The filters can be applied using these flags. + +- --metadata-include - Include metadatas matching pattern +- --metadata-include-from - Read metadata include patterns from file + (use - to read from stdin) +- --metadata-exclude - Exclude metadatas matching pattern +- --metadata-exclude-from - Read metadata exclude patterns from file + (use - to read from stdin) +- --metadata-filter - Add a metadata filtering rule +- --metadata-filter-from - Read metadata filtering patterns from a + file (use - to read from stdin) + +Each flag can be repeated. See the section on how filter rules are +applied for more details - these flags work in an identical way to the +file name filtering flags, but instead of file name patterns have +metadata patterns. + Common pitfalls The most frequent filter support issues on the rclone forum are: @@ -11910,6 +12080,14 @@ See the config providers command for more information on the above. Authentication is required for this call. +config/setpath: Set the path of the config file + +Parameters: + +- path - path to the config file to use + +Authentication is required for this call. + config/update: update the config for a remote. This takes the following parameters: @@ -12010,7 +12188,7 @@ Returns: "result": "" } - OR + OR { "error": true, "result": "" @@ -12201,6 +12379,25 @@ Parameters: - rate - int +debug/set-gc-percent: Call runtime/debug.SetGCPercent for setting the garbage collection target percentage. + +SetGCPercent sets the garbage collection target percentage: a collection +is triggered when the ratio of freshly allocated data to live data +remaining after the previous collection reaches this percentage. +SetGCPercent returns the previous setting. The initial setting is the +value of the GOGC environment variable at startup, or 100 if the +variable is not set. + +This setting may be effectively reduced in order to maintain a memory +limit. A negative percentage effectively disables garbage collection, +unless the memory limit is reached. + +See https://pkg.go.dev/runtime/debug#SetMemoryLimit for more details. + +Parameters: + +- gc-percent - int + debug/set-mutex-profile-fraction: Set runtime.SetMutexProfileFraction for mutex profiling. SetMutexProfileFraction controls the fraction of mutex contention events @@ -12222,6 +12419,47 @@ Results: - previousRate - int +debug/set-soft-memory-limit: Call runtime/debug.SetMemoryLimit for setting a soft memory limit for the runtime. + +SetMemoryLimit provides the runtime with a soft memory limit. + +The runtime undertakes several processes to try to respect this memory +limit, including adjustments to the frequency of garbage collections and +returning memory to the underlying system more aggressively. This limit +will be respected even if GOGC=off (or, if SetGCPercent(-1) is +executed). + +The input limit is provided as bytes, and includes all memory mapped, +managed, and not released by the Go runtime. Notably, it does not +account for space used by the Go binary and memory external to Go, such +as memory managed by the underlying system on behalf of the process, or +memory managed by non-Go code inside the same process. Examples of +excluded memory sources include: OS kernel memory held on behalf of the +process, memory allocated by C code, and memory mapped by syscall.Mmap +(because it is not managed by the Go runtime). + +A zero limit or a limit that's lower than the amount of memory used by +the Go runtime may cause the garbage collector to run nearly +continuously. However, the application may still make progress. + +The memory limit is always respected by the Go runtime, so to +effectively disable this behavior, set the limit very high. +math.MaxInt64 is the canonical value for disabling the limit, but values +much greater than the available memory on the underlying system work +just as well. + +See https://go.dev/doc/gc-guide for a detailed guide explaining the soft +memory limit in more detail, as well as a variety of common use-cases +and scenarios. + +SetMemoryLimit returns the previously set memory limit. A negative input +does not adjust the limit, and allows for retrieval of the currently set +memory limit. + +Parameters: + +- mem-limit - int + fscache/clear: Clear the Fs cache. This clears the fs cache. This is where remotes created from backends @@ -13781,7 +14019,7 @@ upon backend-specific capabilities. Microsoft OneDrive Yes Yes Yes Yes Yes No No Yes Yes Yes OpenDrive Yes Yes Yes Yes No No No No No Yes OpenStack Swift Yes † Yes No No No Yes Yes No Yes No - Oracle Object Storage Yes Yes No No Yes Yes No No No No + Oracle Object Storage No Yes No No Yes Yes Yes No No No pCloud Yes Yes Yes Yes Yes No No Yes Yes Yes premiumize.me Yes No Yes Yes No No No Yes Yes Yes put.io Yes No Yes Yes Yes No Yes No Yes Yes @@ -13791,7 +14029,7 @@ upon backend-specific capabilities. Sia No No No No No No Yes No No Yes SMB No No Yes Yes No No Yes No No Yes SugarSync Yes Yes Yes Yes No No Yes Yes No Yes - Storj Yes † No Yes No No Yes Yes No No No + Storj Yes † Yes Yes No No Yes Yes No No No Uptobox No Yes Yes Yes No No No No No No WebDAV Yes Yes Yes Yes No No Yes ‡ No Yes Yes Yandex Disk Yes Yes Yes Yes Yes No Yes Yes Yes Yes @@ -13903,9 +14141,10 @@ These flags are available for every command. -c, --checksum Skip based on checksum (if available) & size, not mod-time & size --client-cert string Client SSL certificate (PEM) for mutual TLS auth --client-key string Client SSL private key (PEM) for mutual TLS auth + --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO") --compare-dest stringArray Include additional comma separated server-side paths during comparison --config string Config file (default "$HOME/.config/rclone/rclone.conf") - --contimeout duration Connect timeout (default 1m0s) + --contimeout Duration Connect timeout (default 1m0s) --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cpuprofile string Write cpu profile to file --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD") @@ -13923,16 +14162,16 @@ These flags are available for every command. --dump-headers Dump HTTP headers - may contain sensitive info --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts --exclude stringArray Exclude files matching pattern - --exclude-from stringArray Read exclude patterns from file (use - to read from stdin) + --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-if-present stringArray Exclude directories if filename is present - --expect-continue-timeout duration Timeout when using expect / 100-continue in HTTP (default 1s) + --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s) --fast-list Use recursive list if available; uses more memory but fewer transactions --files-from stringArray Read list of source-file names from file (use - to read from stdin) --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin) - -f, --filter stringArray Add a file-filtering rule - --filter-from stringArray Read filtering patterns from a file (use - to read from stdin) - --fs-cache-expire-duration duration Cache remotes for this long (0 to disable caching) (default 5m0s) - --fs-cache-expire-interval duration Interval to check for expired remotes (default 1m0s) + -f, --filter stringArray Add a file filtering rule + --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin) + --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s) + --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s) --header stringArray Set HTTP header for all transactions --header-download stringArray Set HTTP header for download transactions --header-upload stringArray Set HTTP header for upload transactions @@ -13946,9 +14185,9 @@ These flags are available for every command. -I, --ignore-times Don't skip files that match size and time - transfer all files --immutable Do not modify files, fail if existing files have been modified --include stringArray Include files matching pattern - --include-from stringArray Read include patterns from file (use - to read from stdin) + --include-from stringArray Read file include patterns from file (use - to read from stdin) -i, --interactive Enable interactive mode - --kv-lock-time duration Maximum time to keep key-value database locked by process (default 1s) + --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s) --log-file string Log everything to this file --log-format string Comma separated list of log format options (default "date,time") --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE") @@ -13958,16 +14197,22 @@ These flags are available for every command. --max-backlog int Maximum number of objects in sync or check backlog (default 10000) --max-delete int When synchronizing, limit the number of deletes (default -1) --max-depth int If set limits the recursion depth to this (default -1) - --max-duration duration Maximum duration rclone will transfer data for + --max-duration Duration Maximum duration rclone will transfer data for (default 0s) --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off) --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000) --max-transfer SizeSuffix Maximum size of data to transfer (default off) --memprofile string Write memory profile to file -M, --metadata If set, preserve metadata when copying objects + --metadata-exclude stringArray Exclude metadatas matching pattern + --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin) + --metadata-filter stringArray Add a metadata filtering rule + --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin) + --metadata-include stringArray Include metadatas matching pattern + --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin) --metadata-set stringArray Add metadata key=value when uploading --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) - --modify-window duration Max time diff to be considered the same (default 1ns) + --modify-window Duration Max time diff to be considered the same (default 1ns) --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi) --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4) --no-check-certificate Do not verify the server SSL certificate (insecure) @@ -13983,25 +14228,26 @@ These flags are available for every command. --progress-terminal-title Show progress on the terminal title (requires -P/--progress) -q, --quiet Print as little stuff as possible --rc Enable the remote control server - --rc-addr string IPaddress:Port or :Port to bind server to (default "localhost:5572") + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) --rc-allow-origin string Set the allowed origin for CORS --rc-baseurl string Prefix for URLs - leave blank for root - --rc-cert string SSL PEM key (concatenation of certificate and CA certificate) + --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with --rc-enable-metrics Enable prometheus metrics on /metrics --rc-files string Path to local files to serve on the HTTP server - --rc-htpasswd string htpasswd file - if not provided no authentication is done - --rc-job-expire-duration duration Expire finished async jobs older than this value (default 1m0s) - --rc-job-expire-interval duration Interval to check for expired async jobs (default 10s) - --rc-key string SSL PEM Private key + --rc-htpasswd string A htpasswd file - if not provided no authentication is done + --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) + --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s) + --rc-key string TLS PEM Private key --rc-max-header-bytes int Maximum size of request header (default 4096) --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --rc-no-auth Don't require auth for certain methods --rc-pass string Password for authentication - --rc-realm string Realm for authentication (default "rclone") + --rc-realm string Realm for authentication + --rc-salt string Password hashing salt (default "dlPL2MqE") --rc-serve Enable the serving of remote objects - --rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --rc-template string User-specified template --rc-user string User name for authentication --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest") @@ -14011,10 +14257,10 @@ These flags are available for every command. --rc-web-gui-update Check and update to latest version of web gui --refresh-times Refresh the modtime of remote files --retries int Retry operations this many times if they fail (default 3) - --retries-sleep duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) + --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s) --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs --size-only Skip based on size only, not mod-time or checksum - --stats duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s) + --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s) --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45) --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO") --stats-one-line Make the stats fit on one line @@ -14027,7 +14273,7 @@ These flags are available for every command. --syslog Use Syslog for logging --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON") --temp-dir string Directory rclone will use for temporary files (default "/tmp") - --timeout duration IO idle timeout (default 5m0s) + --timeout Duration IO idle timeout (default 5m0s) --tpslimit float Limit HTTP transactions per second to this --tpslimit-burst int Max burst of transactions for --tpslimit (default 1) --track-renames When synchronizing, track file renames and do a server-side move if possible @@ -14038,7 +14284,7 @@ These flags are available for every command. --use-json-log Use json log format --use-mmap Use mmap allocator (see docs) --use-server-modtime Use server modified time instead of object metadata - --user-agent string Set the user-agent to a specified string (default "rclone/v1.60.0") + --user-agent string Set the user-agent to a specified string (default "rclone/v1.61.0") -v, --verbose count Print lots more stuff (repeat for more) Backend Flags @@ -14046,529 +14292,543 @@ Backend Flags These flags are available for every command. They control the backends and may be set in the config file. - --acd-auth-url string Auth server URL - --acd-client-id string OAuth Client Id - --acd-client-secret string OAuth Client Secret - --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi) - --acd-token string OAuth Access Token as a JSON blob - --acd-token-url string Token server url - --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s) - --alias-remote string Remote or path to alias - --azureblob-access-tier string Access tier of blob: hot, cool or archive - --azureblob-account string Storage Account Name - --azureblob-archive-tier-delete Delete archive tier blobs before overwriting - --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi) - --azureblob-disable-checksum Don't store MD5 checksum with object metadata - --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) - --azureblob-endpoint string Endpoint for the service - --azureblob-key string Storage Account Key - --azureblob-list-chunk int Size of blob list (default 5000) - --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any - --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any - --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any - --azureblob-no-head-object If set, do not do HEAD before GET when getting objects - --azureblob-public-access string Public access level of a container: blob or container - --azureblob-sas-url string SAS URL for container level access only - --azureblob-service-principal-file string Path to file containing credentials for use with a service principal - --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16) - --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated) - --azureblob-use-emulator Uses local storage emulator if provided as 'true' - --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) - --b2-account string Account ID or Application Key ID - --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi) - --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi) - --b2-disable-checksum Disable checksums for large (> upload cutoff) files - --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w) - --b2-download-url string Custom endpoint for downloads - --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --b2-endpoint string Endpoint for the service - --b2-hard-delete Permanently delete files on remote removal, otherwise hide files - --b2-key string Application Key - --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging - --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --b2-version-at Time Show file versions as they were at the specified time (default off) - --b2-versions Include old versions in directory listings - --box-access-token string Box App Primary Access Token - --box-auth-url string Auth server URL - --box-box-config-file string Box App config.json location - --box-box-sub-type string (default "user") - --box-client-id string OAuth Client Id - --box-client-secret string OAuth Client Secret - --box-commit-retries int Max number of times to try committing a multipart file (default 100) - --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot) - --box-list-chunk int Size of listing chunk 1-1000 (default 1000) - --box-owned-by string Only show items owned by the login (email address) passed in - --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point - --box-token string OAuth Access Token as a JSON blob - --box-token-url string Token server url - --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi) - --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s) - --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming - --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend") - --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi) - --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi) - --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend") - --cache-db-purge Clear all the cached data for this remote on start - --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) - --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s) - --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server - --cache-plex-password string The password of the Plex user (obscured) - --cache-plex-url string The URL of the Plex server - --cache-plex-username string The username of the Plex user - --cache-read-retries int How many times to retry a read from a cache storage (default 10) - --cache-remote string Remote to cache - --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1) - --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded - --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s) - --cache-workers int How many workers should run in parallel to download chunks (default 4) - --cache-writes Cache file data on writes through the FS - --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi) - --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks - --chunker-hash-type string Choose how chunker handles hash sums (default "md5") - --chunker-remote string Remote to chunk/unchunk - --combine-upstreams SpaceSepList Upstreams for combining - --compress-level int GZIP compression level (-2 to 9) (default -1) - --compress-mode string Compression mode (default "gzip") - --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi) - --compress-remote string Remote to compress - -L, --copy-links Follow symlinks and copy the pointed to item - --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true) - --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32") - --crypt-filename-encryption string How to encrypt the filenames (default "standard") - --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted - --crypt-password string Password or pass phrase for encryption (obscured) - --crypt-password2 string Password or pass phrase for salt (obscured) - --crypt-remote string Remote to encrypt/decrypt - --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs - --crypt-show-mapping For all files listed show how the names encrypt - --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded - --drive-allow-import-name-change Allow the filetype to change when uploading Google docs - --drive-auth-owner-only Only consider files owned by the authenticated user - --drive-auth-url string Auth server URL - --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi) - --drive-client-id string Google Application Client Id - --drive-client-secret string OAuth Client Secret - --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut - --drive-disable-http2 Disable drive using http2 (default true) - --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8) - --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg") - --drive-formats string Deprecated: See export_formats - --drive-impersonate string Impersonate this user when using a service account - --drive-import-formats string Comma separated list of preferred formats for uploading Google docs - --drive-keep-revision-forever Keep new head revision of each file forever - --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000) - --drive-pacer-burst int Number of API calls to allow without sleeping (default 100) - --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms) - --drive-resource-key string Resource key for accessing a link-shared file - --drive-root-folder-id string ID of the root folder - --drive-scope string Scope that rclone should use when requesting access from drive - --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs - --drive-service-account-credentials string Service Account Credentials JSON blob - --drive-service-account-file string Service Account Credentials JSON file path - --drive-shared-with-me Only show files that are shared with me - --drive-size-as-quota Show sizes as storage quota usage, not actual size - --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only - --drive-skip-dangling-shortcuts If set skip dangling shortcut files - --drive-skip-gdocs Skip google documents in all listings - --drive-skip-shortcuts If set skip shortcut files - --drive-starred-only Only show files that are starred - --drive-stop-on-download-limit Make download limit errors be fatal - --drive-stop-on-upload-limit Make upload limit errors be fatal - --drive-team-drive string ID of the Shared Drive (Team Drive) - --drive-token string OAuth Access Token as a JSON blob - --drive-token-url string Token server url - --drive-trashed-only Only show files that are in the trash - --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi) - --drive-use-created-date Use file created date instead of modified date - --drive-use-shared-date Use date file was shared instead of modified date - --drive-use-trash Send files to the trash instead of deleting permanently (default true) - --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off) - --dropbox-auth-url string Auth server URL - --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) - --dropbox-batch-mode string Upload file batching sync|async|off (default "sync") - --dropbox-batch-size int Max number of files in upload batch - --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) - --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi) - --dropbox-client-id string OAuth Client Id - --dropbox-client-secret string OAuth Client Secret - --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot) - --dropbox-impersonate string Impersonate this user when using a business account - --dropbox-shared-files Instructs rclone to work on individual shared files - --dropbox-shared-folders Instructs rclone to work on shared folders - --dropbox-token string OAuth Access Token as a JSON blob - --dropbox-token-url string Token server url - --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl - --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot) - --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured) - --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured) - --fichier-shared-folder string If you want to download a shared folder, add this parameter - --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) - --filefabric-permanent-token string Permanent Authentication Token - --filefabric-root-folder-id string ID of the root folder - --filefabric-token string Session Token - --filefabric-token-expiry string Token expiry time - --filefabric-url string URL of the Enterprise File Fabric to connect to - --filefabric-version string Version read from the file fabric - --ftp-ask-password Allow asking for FTP password when needed - --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) - --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited - --ftp-disable-epsv Disable using EPSV even if server advertises support - --ftp-disable-mlsd Disable using MLSD even if server advertises support - --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) - --ftp-disable-utf8 Disable using UTF-8 even if server advertises support - --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot) - --ftp-explicit-tls Use Explicit FTPS (FTP over TLS) - --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD - --ftp-host string FTP host to connect to - --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --ftp-no-check-certificate Do not verify the TLS certificate of the server - --ftp-pass string FTP password (obscured) - --ftp-port int FTP port number (default 21) - --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s) - --ftp-tls Use Implicit FTPS (FTP over TLS) - --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32) - --ftp-user string FTP username (default "$USER") - --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk) - --gcs-anonymous Access public buckets and objects without credentials - --gcs-auth-url string Auth server URL - --gcs-bucket-acl string Access Control List for new buckets - --gcs-bucket-policy-only Access checks should use bucket-level IAM policies - --gcs-client-id string OAuth Client Id - --gcs-client-secret string OAuth Client Secret - --gcs-decompress If set this will decompress gzip encoded objects - --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) - --gcs-endpoint string Endpoint for the service - --gcs-location string Location for the newly created buckets - --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it - --gcs-object-acl string Access Control List for new objects - --gcs-project-number string Project number - --gcs-service-account-file string Service Account Credentials JSON file path - --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage - --gcs-token string OAuth Access Token as a JSON blob - --gcs-token-url string Token server url - --gphotos-auth-url string Auth server URL - --gphotos-client-id string OAuth Client Id - --gphotos-client-secret string OAuth Client Secret - --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) - --gphotos-include-archived Also view and download archived media - --gphotos-read-only Set to make the Google Photos backend read only - --gphotos-read-size Set to read the size of media items - --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000) - --gphotos-token string OAuth Access Token as a JSON blob - --gphotos-token-url string Token server url - --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default) - --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1) - --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off) - --hasher-remote string Remote to cache checksums for (e.g. myRemote:path) - --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy - --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot) - --hdfs-namenode string Hadoop name node and port - --hdfs-service-principal-name string Kerberos service principal name for the namenode - --hdfs-username string Hadoop user name - --hidrive-auth-url string Auth server URL - --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi) - --hidrive-client-id string OAuth Client Id - --hidrive-client-secret string OAuth Client Secret - --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary - --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot) - --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1") - --hidrive-root-prefix string The root/parent folder for all paths (default "/") - --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw") - --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user") - --hidrive-token string OAuth Access Token as a JSON blob - --hidrive-token-url string Token server url - --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4) - --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi) - --http-headers CommaSepList Set HTTP headers for all transactions - --http-no-head Don't use HEAD requests - --http-no-slash Set this if the site doesn't end directories with / - --http-url string URL of HTTP host to connect to - --internetarchive-access-key-id string IAS3 Access Key - --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true) - --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot) - --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org") - --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org") - --internetarchive-secret-access-key string IAS3 Secret Key (password) - --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s) - --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot) - --jottacloud-hard-delete Delete files permanently rather than putting them into the trash - --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi) - --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them - --jottacloud-trashed-only Only show files that are in the trash - --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi) - --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --koofr-endpoint string The Koofr API endpoint to use - --koofr-mountid string Mount ID of the mount to use - --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured) - --koofr-provider string Choose your storage provider - --koofr-setmtime Does the backend support setting modification time (default true) - --koofr-user string Your user name - -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension - --local-case-insensitive Force the filesystem to report itself as case insensitive - --local-case-sensitive Force the filesystem to report itself as case sensitive - --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot) - --local-no-check-updated Don't check to see if the files change during upload - --local-no-preallocate Disable preallocation of disk space for transferred files - --local-no-set-modtime Disable setting modtime - --local-no-sparse Disable sparse files for multi-thread downloads - --local-nounc Disable UNC (long path names) conversion on Windows - --local-unicode-normalization Apply unicode NFC normalization to paths and filenames - --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated) - --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true) - --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --mailru-pass string Password (obscured) - --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true) - --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf") - --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi) - --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi) - --mailru-user string User name (usually email) - --mega-debug Output more debug from Mega - --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --mega-hard-delete Delete files permanently rather than putting them into the trash - --mega-pass string Password (obscured) - --mega-user string User name - --netstorage-account string Set the NetStorage account name - --netstorage-host string Domain+path of NetStorage host to connect to - --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https") - --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured) - -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only) - --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access) - --onedrive-auth-url string Auth server URL - --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi) - --onedrive-client-id string OAuth Client Id - --onedrive-client-secret string OAuth Client Secret - --onedrive-drive-id string The ID of the drive to use - --onedrive-drive-type string The type of the drive (personal | business | documentLibrary) - --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot) - --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings - --onedrive-link-password string Set the password for links created by the link command - --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous") - --onedrive-link-type string Set the type of the links created by the link command (default "view") - --onedrive-list-chunk int Size of listing chunk (default 1000) - --onedrive-no-versions Remove all versions on modifying operations - --onedrive-region string Choose national cloud region for OneDrive (default "global") - --onedrive-root-folder-id string ID of the root folder - --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs - --onedrive-token string OAuth Access Token as a JSON blob - --onedrive-token-url string Token server url - --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) - --oos-compartment string Object storage compartment OCID - --oos-config-file string Path to OCI config file (default "~/.oci/config") - --oos-config-profile string Profile name inside the oci config file (default "Default") - --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) - --oos-copy-timeout Duration Timeout for copy (default 1m0s) - --oos-disable-checksum Don't store MD5 checksum with object metadata - --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --oos-endpoint string Endpoint for Object storage API - --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery - --oos-namespace string Object storage namespace - --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it - --oos-provider string Choose your Auth Provider (default "env_auth") - --oos-region string Object storage Region - --oos-upload-concurrency int Concurrency for multipart uploads (default 10) - --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi) - --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) - --opendrive-password string Password (obscured) - --opendrive-username string Username - --pcloud-auth-url string Auth server URL - --pcloud-client-id string OAuth Client Id - --pcloud-client-secret string OAuth Client Secret - --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --pcloud-hostname string Hostname to connect to (default "api.pcloud.com") - --pcloud-password string Your pcloud password (obscured) - --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0") - --pcloud-token string OAuth Access Token as a JSON blob - --pcloud-token-url string Token server url - --pcloud-username string Your pcloud username - --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --qingstor-access-key-id string QingStor Access Key ID - --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi) - --qingstor-connection-retries int Number of connection retries (default 3) - --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8) - --qingstor-endpoint string Enter an endpoint URL to connection QingStor API - --qingstor-env-auth Get QingStor credentials from runtime - --qingstor-secret-access-key string QingStor Secret Access Key (password) - --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1) - --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --qingstor-zone string Zone to connect to - --s3-access-key-id string AWS Access Key ID - --s3-acl string Canned ACL used when creating buckets and storing or copying objects - --s3-bucket-acl string Canned ACL used when creating buckets - --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) - --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) - --s3-decompress If set this will decompress gzip encoded objects - --s3-disable-checksum Don't store MD5 checksum with object metadata - --s3-disable-http2 Disable usage of http2 for S3 backends - --s3-download-url string Custom endpoint for downloads - --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --s3-endpoint string Endpoint for S3 API - --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars) - --s3-force-path-style If true use path style access if false use virtual hosted style (default true) - --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery - --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000) - --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset) - --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto - --s3-location-constraint string Location constraint - must be set to match the Region - --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000) - --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it - --s3-no-head If set, don't HEAD uploaded objects to check integrity - --s3-no-head-object If set, do not do HEAD before GET when getting objects - --s3-no-system-metadata Suppress setting and reading of system metadata - --s3-profile string Profile to use in the shared credentials file - --s3-provider string Choose your S3 provider - --s3-region string Region to connect to - --s3-requester-pays Enables requester pays option when interacting with S3 bucket - --s3-secret-access-key string AWS Secret Access Key (password) - --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 - --s3-session-token string An AWS session token - --s3-shared-credentials-file string Path to the shared credentials file - --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3 - --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data - --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data - --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) - --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key - --s3-storage-class string The storage class to use when storing new objects in S3 - --s3-upload-concurrency int Concurrency for multipart uploads (default 4) - --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint - --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) - --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads - --s3-v2-auth If true use v2 authentication - --s3-version-at Time Show file versions as they were at the specified time (default off) - --s3-versions Include old versions in directory listings - --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled) - --seafile-create-library Should rclone create a library if it doesn't exist - --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8) - --seafile-library string Name of the library - --seafile-library-key string Library password (for encrypted libraries only) (obscured) - --seafile-pass string Password (obscured) - --seafile-url string URL of seafile host to connect to - --seafile-user string User name (usually email address) - --sftp-ask-password Allow asking for SFTP password when needed - --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki) - --sftp-concurrency int The maximum number of outstanding requests for one file (default 64) - --sftp-disable-concurrent-reads If set don't use concurrent reads - --sftp-disable-concurrent-writes If set don't use concurrent writes - --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available - --sftp-host string SSH host to connect to - --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --sftp-key-file string Path to PEM-encoded private key file - --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured) - --sftp-key-pem string Raw PEM-encoded private key - --sftp-key-use-agent When set forces the usage of the ssh-agent - --sftp-known-hosts-file string Optional path to known_hosts file - --sftp-md5sum-command string The command used to read md5 hashes - --sftp-pass string SSH password, leave blank to use ssh-agent (obscured) - --sftp-path-override string Override path used by SSH shell commands - --sftp-port int SSH port number (default 22) - --sftp-pubkey-file string Optional path to public key file - --sftp-server-command string Specifies the path or command to run a sftp server on the remote host - --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands - --sftp-set-modtime Set the modified time on the remote if set (default true) - --sftp-sha1sum-command string The command used to read sha1 hashes - --sftp-shell-type string The type of SSH shell on remote server, if any - --sftp-skip-links Set to skip any symlinks and any other non regular files - --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp") - --sftp-use-fstat If set use fstat instead of stat - --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods - --sftp-user string SSH username (default "$USER") - --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi) - --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot) - --sharefile-endpoint string Endpoint for API calls - --sharefile-root-folder-id string ID of the root folder - --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi) - --sia-api-password string Sia Daemon API Password (obscured) - --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980") - --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot) - --sia-user-agent string Siad User Agent (default "Sia-Agent") - --skip-links Don't warn about skipped symlinks - --smb-case-insensitive Whether the server is configured to be case-insensitive (default true) - --smb-domain string Domain name for NTLM authentication (default "WORKGROUP") - --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot) - --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true) - --smb-host string SMB server hostname to connect to - --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --smb-pass string SMB password (obscured) - --smb-port int SMB port number (default 445) - --smb-user string SMB username (default "$USER") - --storj-access-grant string Access grant - --storj-api-key string API key - --storj-passphrase string Encryption passphrase - --storj-provider string Choose an authentication method (default "existing") - --storj-satellite-address string Satellite address (default "us-central-1.storj.io") - --sugarsync-access-key-id string Sugarsync Access Key ID - --sugarsync-app-id string Sugarsync App ID - --sugarsync-authorization string Sugarsync authorization - --sugarsync-authorization-expiry string Sugarsync authorization expiry - --sugarsync-deleted-id string Sugarsync deleted folder id - --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot) - --sugarsync-hard-delete Permanently delete files if true - --sugarsync-private-access-key string Sugarsync Private Access Key - --sugarsync-refresh-token string Sugarsync refresh token - --sugarsync-root-id string Sugarsync root id - --sugarsync-user string Sugarsync user - --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID) - --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME) - --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET) - --swift-auth string Authentication URL for server (OS_AUTH_URL) - --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) - --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) - --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi) - --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) - --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8) - --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") - --swift-env-auth Get swift credentials from environment variables in standard OpenStack form - --swift-key string API key or password (OS_PASSWORD) - --swift-leave-parts-on-error If true avoid calling abort upload on a failure - --swift-no-chunk Don't chunk files during streaming upload - --swift-no-large-objects Disable support for static and dynamic large objects - --swift-region string Region name - optional (OS_REGION_NAME) - --swift-storage-policy string The storage policy to use when creating a new container - --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) - --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) - --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) - --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) - --swift-user string User name to log in (OS_USERNAME) - --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID) - --union-action-policy string Policy to choose upstream on ACTION category (default "epall") - --union-cache-time int Cache time of usage and free space (in seconds) (default 120) - --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs") - --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi) - --union-search-policy string Policy to choose upstream on SEARCH category (default "ff") - --union-upstreams string List of space separated upstreams - --uptobox-access-token string Your access token - --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) - --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) - --webdav-bearer-token-command string Command to run to get a bearer token - --webdav-encoding string The encoding for the backend - --webdav-headers CommaSepList Set HTTP headers for all transactions - --webdav-pass string Password (obscured) - --webdav-url string URL of http host to connect to - --webdav-user string User name - --webdav-vendor string Name of the WebDAV site/service/software you are using - --yandex-auth-url string Auth server URL - --yandex-client-id string OAuth Client Id - --yandex-client-secret string OAuth Client Secret - --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) - --yandex-hard-delete Delete files permanently rather than putting them into the trash - --yandex-token string OAuth Access Token as a JSON blob - --yandex-token-url string Token server url - --zoho-auth-url string Auth server URL - --zoho-client-id string OAuth Client Id - --zoho-client-secret string OAuth Client Secret - --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8) - --zoho-region string Zoho region to connect to - --zoho-token string OAuth Access Token as a JSON blob - --zoho-token-url string Token server url + --acd-auth-url string Auth server URL + --acd-client-id string OAuth Client Id + --acd-client-secret string OAuth Client Secret + --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi) + --acd-token string OAuth Access Token as a JSON blob + --acd-token-url string Token server url + --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s) + --alias-remote string Remote or path to alias + --azureblob-access-tier string Access tier of blob: hot, cool or archive + --azureblob-account string Azure Storage Account Name + --azureblob-archive-tier-delete Delete archive tier blobs before overwriting + --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi) + --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured) + --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key + --azureblob-client-id string The ID of the client in use + --azureblob-client-secret string One of the service principal's client secrets + --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth + --azureblob-disable-checksum Don't store MD5 checksum with object metadata + --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) + --azureblob-endpoint string Endpoint for the service + --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI) + --azureblob-key string Storage Account Shared Key + --azureblob-list-chunk int Size of blob list (default 5000) + --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any + --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any + --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any + --azureblob-no-check-container If set, don't attempt to check the container exists or create it + --azureblob-no-head-object If set, do not do HEAD before GET when getting objects + --azureblob-password string The user's password (obscured) + --azureblob-public-access string Public access level of a container: blob or container + --azureblob-sas-url string SAS URL for container level access only + --azureblob-service-principal-file string Path to file containing credentials for use with a service principal + --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID + --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16) + --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated) + --azureblob-use-emulator Uses local storage emulator if provided as 'true' + --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) + --azureblob-username string User name (usually an email address) + --b2-account string Account ID or Application Key ID + --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi) + --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi) + --b2-disable-checksum Disable checksums for large (> upload cutoff) files + --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w) + --b2-download-url string Custom endpoint for downloads + --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --b2-endpoint string Endpoint for the service + --b2-hard-delete Permanently delete files on remote removal, otherwise hide files + --b2-key string Application Key + --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging + --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --b2-version-at Time Show file versions as they were at the specified time (default off) + --b2-versions Include old versions in directory listings + --box-access-token string Box App Primary Access Token + --box-auth-url string Auth server URL + --box-box-config-file string Box App config.json location + --box-box-sub-type string (default "user") + --box-client-id string OAuth Client Id + --box-client-secret string OAuth Client Secret + --box-commit-retries int Max number of times to try committing a multipart file (default 100) + --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot) + --box-list-chunk int Size of listing chunk 1-1000 (default 1000) + --box-owned-by string Only show items owned by the login (email address) passed in + --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point + --box-token string OAuth Access Token as a JSON blob + --box-token-url string Token server url + --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi) + --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s) + --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming + --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend") + --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi) + --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi) + --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend") + --cache-db-purge Clear all the cached data for this remote on start + --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) + --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s) + --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server + --cache-plex-password string The password of the Plex user (obscured) + --cache-plex-url string The URL of the Plex server + --cache-plex-username string The username of the Plex user + --cache-read-retries int How many times to retry a read from a cache storage (default 10) + --cache-remote string Remote to cache + --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1) + --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded + --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s) + --cache-workers int How many workers should run in parallel to download chunks (default 4) + --cache-writes Cache file data on writes through the FS + --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi) + --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks + --chunker-hash-type string Choose how chunker handles hash sums (default "md5") + --chunker-remote string Remote to chunk/unchunk + --combine-upstreams SpaceSepList Upstreams for combining + --compress-level int GZIP compression level (-2 to 9) (default -1) + --compress-mode string Compression mode (default "gzip") + --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi) + --compress-remote string Remote to compress + -L, --copy-links Follow symlinks and copy the pointed to item + --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true) + --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32") + --crypt-filename-encryption string How to encrypt the filenames (default "standard") + --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted + --crypt-password string Password or pass phrase for encryption (obscured) + --crypt-password2 string Password or pass phrase for salt (obscured) + --crypt-remote string Remote to encrypt/decrypt + --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs + --crypt-show-mapping For all files listed show how the names encrypt + --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded + --drive-allow-import-name-change Allow the filetype to change when uploading Google docs + --drive-auth-owner-only Only consider files owned by the authenticated user + --drive-auth-url string Auth server URL + --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi) + --drive-client-id string Google Application Client Id + --drive-client-secret string OAuth Client Secret + --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut + --drive-disable-http2 Disable drive using http2 (default true) + --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8) + --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg") + --drive-formats string Deprecated: See export_formats + --drive-impersonate string Impersonate this user when using a service account + --drive-import-formats string Comma separated list of preferred formats for uploading Google docs + --drive-keep-revision-forever Keep new head revision of each file forever + --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000) + --drive-pacer-burst int Number of API calls to allow without sleeping (default 100) + --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms) + --drive-resource-key string Resource key for accessing a link-shared file + --drive-root-folder-id string ID of the root folder + --drive-scope string Scope that rclone should use when requesting access from drive + --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs + --drive-service-account-credentials string Service Account Credentials JSON blob + --drive-service-account-file string Service Account Credentials JSON file path + --drive-shared-with-me Only show files that are shared with me + --drive-size-as-quota Show sizes as storage quota usage, not actual size + --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only + --drive-skip-dangling-shortcuts If set skip dangling shortcut files + --drive-skip-gdocs Skip google documents in all listings + --drive-skip-shortcuts If set skip shortcut files + --drive-starred-only Only show files that are starred + --drive-stop-on-download-limit Make download limit errors be fatal + --drive-stop-on-upload-limit Make upload limit errors be fatal + --drive-team-drive string ID of the Shared Drive (Team Drive) + --drive-token string OAuth Access Token as a JSON blob + --drive-token-url string Token server url + --drive-trashed-only Only show files that are in the trash + --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi) + --drive-use-created-date Use file created date instead of modified date + --drive-use-shared-date Use date file was shared instead of modified date + --drive-use-trash Send files to the trash instead of deleting permanently (default true) + --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off) + --dropbox-auth-url string Auth server URL + --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) + --dropbox-batch-mode string Upload file batching sync|async|off (default "sync") + --dropbox-batch-size int Max number of files in upload batch + --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) + --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi) + --dropbox-client-id string OAuth Client Id + --dropbox-client-secret string OAuth Client Secret + --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot) + --dropbox-impersonate string Impersonate this user when using a business account + --dropbox-shared-files Instructs rclone to work on individual shared files + --dropbox-shared-folders Instructs rclone to work on shared folders + --dropbox-token string OAuth Access Token as a JSON blob + --dropbox-token-url string Token server url + --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl + --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot) + --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured) + --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured) + --fichier-shared-folder string If you want to download a shared folder, add this parameter + --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) + --filefabric-permanent-token string Permanent Authentication Token + --filefabric-root-folder-id string ID of the root folder + --filefabric-token string Session Token + --filefabric-token-expiry string Token expiry time + --filefabric-url string URL of the Enterprise File Fabric to connect to + --filefabric-version string Version read from the file fabric + --ftp-ask-password Allow asking for FTP password when needed + --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) + --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited + --ftp-disable-epsv Disable using EPSV even if server advertises support + --ftp-disable-mlsd Disable using MLSD even if server advertises support + --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) + --ftp-disable-utf8 Disable using UTF-8 even if server advertises support + --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot) + --ftp-explicit-tls Use Explicit FTPS (FTP over TLS) + --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD + --ftp-host string FTP host to connect to + --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --ftp-no-check-certificate Do not verify the TLS certificate of the server + --ftp-pass string FTP password (obscured) + --ftp-port int FTP port number (default 21) + --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s) + --ftp-tls Use Implicit FTPS (FTP over TLS) + --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32) + --ftp-user string FTP username (default "$USER") + --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk) + --gcs-anonymous Access public buckets and objects without credentials + --gcs-auth-url string Auth server URL + --gcs-bucket-acl string Access Control List for new buckets + --gcs-bucket-policy-only Access checks should use bucket-level IAM policies + --gcs-client-id string OAuth Client Id + --gcs-client-secret string OAuth Client Secret + --gcs-decompress If set this will decompress gzip encoded objects + --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) + --gcs-endpoint string Endpoint for the service + --gcs-location string Location for the newly created buckets + --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it + --gcs-object-acl string Access Control List for new objects + --gcs-project-number string Project number + --gcs-service-account-file string Service Account Credentials JSON file path + --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage + --gcs-token string OAuth Access Token as a JSON blob + --gcs-token-url string Token server url + --gphotos-auth-url string Auth server URL + --gphotos-client-id string OAuth Client Id + --gphotos-client-secret string OAuth Client Secret + --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) + --gphotos-include-archived Also view and download archived media + --gphotos-read-only Set to make the Google Photos backend read only + --gphotos-read-size Set to read the size of media items + --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000) + --gphotos-token string OAuth Access Token as a JSON blob + --gphotos-token-url string Token server url + --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default) + --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1) + --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off) + --hasher-remote string Remote to cache checksums for (e.g. myRemote:path) + --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy + --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot) + --hdfs-namenode string Hadoop name node and port + --hdfs-service-principal-name string Kerberos service principal name for the namenode + --hdfs-username string Hadoop user name + --hidrive-auth-url string Auth server URL + --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi) + --hidrive-client-id string OAuth Client Id + --hidrive-client-secret string OAuth Client Secret + --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary + --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot) + --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1") + --hidrive-root-prefix string The root/parent folder for all paths (default "/") + --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw") + --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user") + --hidrive-token string OAuth Access Token as a JSON blob + --hidrive-token-url string Token server url + --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4) + --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi) + --http-headers CommaSepList Set HTTP headers for all transactions + --http-no-head Don't use HEAD requests + --http-no-slash Set this if the site doesn't end directories with / + --http-url string URL of HTTP host to connect to + --internetarchive-access-key-id string IAS3 Access Key + --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true) + --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot) + --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org") + --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org") + --internetarchive-secret-access-key string IAS3 Secret Key (password) + --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s) + --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot) + --jottacloud-hard-delete Delete files permanently rather than putting them into the trash + --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi) + --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them + --jottacloud-trashed-only Only show files that are in the trash + --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi) + --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --koofr-endpoint string The Koofr API endpoint to use + --koofr-mountid string Mount ID of the mount to use + --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured) + --koofr-provider string Choose your storage provider + --koofr-setmtime Does the backend support setting modification time (default true) + --koofr-user string Your user name + -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension + --local-case-insensitive Force the filesystem to report itself as case insensitive + --local-case-sensitive Force the filesystem to report itself as case sensitive + --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot) + --local-no-check-updated Don't check to see if the files change during upload + --local-no-preallocate Disable preallocation of disk space for transferred files + --local-no-set-modtime Disable setting modtime + --local-no-sparse Disable sparse files for multi-thread downloads + --local-nounc Disable UNC (long path names) conversion on Windows + --local-unicode-normalization Apply unicode NFC normalization to paths and filenames + --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated) + --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true) + --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --mailru-pass string Password (obscured) + --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true) + --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf") + --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi) + --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi) + --mailru-user string User name (usually email) + --mega-debug Output more debug from Mega + --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --mega-hard-delete Delete files permanently rather than putting them into the trash + --mega-pass string Password (obscured) + --mega-user string User name + --netstorage-account string Set the NetStorage account name + --netstorage-host string Domain+path of NetStorage host to connect to + --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https") + --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured) + -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only) + --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access) + --onedrive-auth-url string Auth server URL + --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi) + --onedrive-client-id string OAuth Client Id + --onedrive-client-secret string OAuth Client Secret + --onedrive-drive-id string The ID of the drive to use + --onedrive-drive-type string The type of the drive (personal | business | documentLibrary) + --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot) + --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings + --onedrive-link-password string Set the password for links created by the link command + --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous") + --onedrive-link-type string Set the type of the links created by the link command (default "view") + --onedrive-list-chunk int Size of listing chunk (default 1000) + --onedrive-no-versions Remove all versions on modifying operations + --onedrive-region string Choose national cloud region for OneDrive (default "global") + --onedrive-root-folder-id string ID of the root folder + --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs + --onedrive-token string OAuth Access Token as a JSON blob + --onedrive-token-url string Token server url + --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) + --oos-compartment string Object storage compartment OCID + --oos-config-file string Path to OCI config file (default "~/.oci/config") + --oos-config-profile string Profile name inside the oci config file (default "Default") + --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) + --oos-copy-timeout Duration Timeout for copy (default 1m0s) + --oos-disable-checksum Don't store MD5 checksum with object metadata + --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --oos-endpoint string Endpoint for Object storage API + --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery + --oos-namespace string Object storage namespace + --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it + --oos-provider string Choose your Auth Provider (default "env_auth") + --oos-region string Object storage Region + --oos-upload-concurrency int Concurrency for multipart uploads (default 10) + --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi) + --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) + --opendrive-password string Password (obscured) + --opendrive-username string Username + --pcloud-auth-url string Auth server URL + --pcloud-client-id string OAuth Client Id + --pcloud-client-secret string OAuth Client Secret + --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --pcloud-hostname string Hostname to connect to (default "api.pcloud.com") + --pcloud-password string Your pcloud password (obscured) + --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0") + --pcloud-token string OAuth Access Token as a JSON blob + --pcloud-token-url string Token server url + --pcloud-username string Your pcloud username + --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --qingstor-access-key-id string QingStor Access Key ID + --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi) + --qingstor-connection-retries int Number of connection retries (default 3) + --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8) + --qingstor-endpoint string Enter an endpoint URL to connection QingStor API + --qingstor-env-auth Get QingStor credentials from runtime + --qingstor-secret-access-key string QingStor Secret Access Key (password) + --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1) + --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --qingstor-zone string Zone to connect to + --s3-access-key-id string AWS Access Key ID + --s3-acl string Canned ACL used when creating buckets and storing or copying objects + --s3-bucket-acl string Canned ACL used when creating buckets + --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) + --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) + --s3-decompress If set this will decompress gzip encoded objects + --s3-disable-checksum Don't store MD5 checksum with object metadata + --s3-disable-http2 Disable usage of http2 for S3 backends + --s3-download-url string Custom endpoint for downloads + --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --s3-endpoint string Endpoint for S3 API + --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars) + --s3-force-path-style If true use path style access if false use virtual hosted style (default true) + --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery + --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000) + --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset) + --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto + --s3-location-constraint string Location constraint - must be set to match the Region + --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000) + --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset) + --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it + --s3-no-head If set, don't HEAD uploaded objects to check integrity + --s3-no-head-object If set, do not do HEAD before GET when getting objects + --s3-no-system-metadata Suppress setting and reading of system metadata + --s3-profile string Profile to use in the shared credentials file + --s3-provider string Choose your S3 provider + --s3-region string Region to connect to + --s3-requester-pays Enables requester pays option when interacting with S3 bucket + --s3-secret-access-key string AWS Secret Access Key (password) + --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 + --s3-session-token string An AWS session token + --s3-shared-credentials-file string Path to the shared credentials file + --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3 + --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data + --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data + --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) + --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key + --s3-storage-class string The storage class to use when storing new objects in S3 + --s3-upload-concurrency int Concurrency for multipart uploads (default 4) + --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint + --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) + --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads + --s3-v2-auth If true use v2 authentication + --s3-version-at Time Show file versions as they were at the specified time (default off) + --s3-versions Include old versions in directory listings + --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled) + --seafile-create-library Should rclone create a library if it doesn't exist + --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8) + --seafile-library string Name of the library + --seafile-library-key string Library password (for encrypted libraries only) (obscured) + --seafile-pass string Password (obscured) + --seafile-url string URL of seafile host to connect to + --seafile-user string User name (usually email address) + --sftp-ask-password Allow asking for SFTP password when needed + --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki) + --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference + --sftp-concurrency int The maximum number of outstanding requests for one file (default 64) + --sftp-disable-concurrent-reads If set don't use concurrent reads + --sftp-disable-concurrent-writes If set don't use concurrent writes + --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available + --sftp-host string SSH host to connect to + --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference + --sftp-key-file string Path to PEM-encoded private key file + --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured) + --sftp-key-pem string Raw PEM-encoded private key + --sftp-key-use-agent When set forces the usage of the ssh-agent + --sftp-known-hosts-file string Optional path to known_hosts file + --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference + --sftp-md5sum-command string The command used to read md5 hashes + --sftp-pass string SSH password, leave blank to use ssh-agent (obscured) + --sftp-path-override string Override path used by SSH shell commands + --sftp-port int SSH port number (default 22) + --sftp-pubkey-file string Optional path to public key file + --sftp-server-command string Specifies the path or command to run a sftp server on the remote host + --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands + --sftp-set-modtime Set the modified time on the remote if set (default true) + --sftp-sha1sum-command string The command used to read sha1 hashes + --sftp-shell-type string The type of SSH shell on remote server, if any + --sftp-skip-links Set to skip any symlinks and any other non regular files + --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp") + --sftp-use-fstat If set use fstat instead of stat + --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods + --sftp-user string SSH username (default "$USER") + --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi) + --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot) + --sharefile-endpoint string Endpoint for API calls + --sharefile-root-folder-id string ID of the root folder + --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi) + --sia-api-password string Sia Daemon API Password (obscured) + --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980") + --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot) + --sia-user-agent string Siad User Agent (default "Sia-Agent") + --skip-links Don't warn about skipped symlinks + --smb-case-insensitive Whether the server is configured to be case-insensitive (default true) + --smb-domain string Domain name for NTLM authentication (default "WORKGROUP") + --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot) + --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true) + --smb-host string SMB server hostname to connect to + --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --smb-pass string SMB password (obscured) + --smb-port int SMB port number (default 445) + --smb-user string SMB username (default "$USER") + --storj-access-grant string Access grant + --storj-api-key string API key + --storj-passphrase string Encryption passphrase + --storj-provider string Choose an authentication method (default "existing") + --storj-satellite-address string Satellite address (default "us-central-1.storj.io") + --sugarsync-access-key-id string Sugarsync Access Key ID + --sugarsync-app-id string Sugarsync App ID + --sugarsync-authorization string Sugarsync authorization + --sugarsync-authorization-expiry string Sugarsync authorization expiry + --sugarsync-deleted-id string Sugarsync deleted folder id + --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot) + --sugarsync-hard-delete Permanently delete files if true + --sugarsync-private-access-key string Sugarsync Private Access Key + --sugarsync-refresh-token string Sugarsync refresh token + --sugarsync-root-id string Sugarsync root id + --sugarsync-user string Sugarsync user + --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID) + --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME) + --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET) + --swift-auth string Authentication URL for server (OS_AUTH_URL) + --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) + --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) + --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi) + --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) + --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8) + --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") + --swift-env-auth Get swift credentials from environment variables in standard OpenStack form + --swift-key string API key or password (OS_PASSWORD) + --swift-leave-parts-on-error If true avoid calling abort upload on a failure + --swift-no-chunk Don't chunk files during streaming upload + --swift-no-large-objects Disable support for static and dynamic large objects + --swift-region string Region name - optional (OS_REGION_NAME) + --swift-storage-policy string The storage policy to use when creating a new container + --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) + --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) + --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) + --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) + --swift-user string User name to log in (OS_USERNAME) + --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID) + --union-action-policy string Policy to choose upstream on ACTION category (default "epall") + --union-cache-time int Cache time of usage and free space (in seconds) (default 120) + --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs") + --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi) + --union-search-policy string Policy to choose upstream on SEARCH category (default "ff") + --union-upstreams string List of space separated upstreams + --uptobox-access-token string Your access token + --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) + --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) + --webdav-bearer-token-command string Command to run to get a bearer token + --webdav-encoding string The encoding for the backend + --webdav-headers CommaSepList Set HTTP headers for all transactions + --webdav-pass string Password (obscured) + --webdav-url string URL of http host to connect to + --webdav-user string User name + --webdav-vendor string Name of the WebDAV site/service/software you are using + --yandex-auth-url string Auth server URL + --yandex-client-id string OAuth Client Id + --yandex-client-secret string OAuth Client Secret + --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) + --yandex-hard-delete Delete files permanently rather than putting them into the trash + --yandex-token string OAuth Access Token as a JSON blob + --yandex-token-url string Token server url + --zoho-auth-url string Auth server URL + --zoho-client-id string OAuth Client Id + --zoho-client-secret string OAuth Client Secret + --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8) + --zoho-region string Zoho region to connect to + --zoho-token string OAuth Access Token as a JSON blob + --zoho-token-url string Token server url Docker Volume Plugin @@ -16542,9 +16802,10 @@ This will guide you through an interactive setup process: token_url> Optional token URL Remote config Make sure your Redirect URL is set to "http://127.0.0.1:53682/" in your custom config. - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -16815,6 +17076,7 @@ The S3 backend can be used with a number of different providers: - IBM COS S3 - IDrive e2 - IONOS Cloud +- Liara Object Storage - Minio - Qiniu Cloud Object Storage (Kodo) - RackCorp Object Storage @@ -16870,7 +17132,7 @@ This will guide you through an interactive setup process. Type of storage to configure. Choose a number from below, or type in your own value [snip] - XX / Amazon S3 Compliant Storage Providers including AWS, Ceph, ChinaMobile, ArvanCloud, Dreamhost, IBM COS, Minio, and Tencent COS + XX / Amazon S3 Compliant Storage Providers including AWS, Ceph, ChinaMobile, ArvanCloud, Dreamhost, IBM COS, Liara, Minio, and Tencent COS \ "s3" [snip] Storage> s3 @@ -16880,7 +17142,7 @@ This will guide you through an interactive setup process. \ "AWS" 2 / Ceph Object Storage \ "Ceph" - 3 / Digital Ocean Spaces + 3 / DigitalOcean Spaces \ "DigitalOcean" 4 / Dreamhost DreamObjects \ "Dreamhost" @@ -17430,9 +17692,9 @@ Standard options Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, -Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, -IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, -SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). +Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, +IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, +Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). --s3-provider @@ -17458,7 +17720,7 @@ Properties: - "ArvanCloud" - Arvan Cloud Object Storage (AOS) - "DigitalOcean" - - Digital Ocean Spaces + - DigitalOcean Spaces - "Dreamhost" - Dreamhost DreamObjects - "HuaweiOBS" @@ -17471,6 +17733,8 @@ Properties: - IONOS Cloud - "LyveCloud" - Seagate Lyve Cloud + - "Liara" + - Liara Object Storage - "Minio" - Minio Object Storage - "Netease" @@ -17824,7 +18088,7 @@ Properties: - Config: region - Env Var: RCLONE_S3_REGION - Provider: - !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive + !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive - Type: string - Required: false - Examples: @@ -18103,6 +18367,22 @@ Properties: --s3-endpoint +Endpoint for Liara Object Storage API. + +Properties: + +- Config: endpoint +- Env Var: RCLONE_S3_ENDPOINT +- Provider: Liara +- Type: string +- Required: false +- Examples: + - "storage.iran.liara.space" + - The default endpoint + - Iran + +--s3-endpoint + Endpoint for OSS API. Properties: @@ -18404,18 +18684,24 @@ Properties: - Config: endpoint - Env Var: RCLONE_S3_ENDPOINT - Provider: - !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu + !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu - Type: string - Required: false - Examples: - "objects-us-east-1.dream.io" - Dream Objects endpoint + - "syd1.digitaloceanspaces.com" + - DigitalOcean Spaces Sydney 1 + - "sfo3.digitaloceanspaces.com" + - DigitalOcean Spaces San Francisco 3 + - "fra1.digitaloceanspaces.com" + - DigitalOcean Spaces Frankfurt 1 - "nyc3.digitaloceanspaces.com" - - Digital Ocean Spaces New York 3 + - DigitalOcean Spaces New York 3 - "ams3.digitaloceanspaces.com" - - Digital Ocean Spaces Amsterdam 3 + - DigitalOcean Spaces Amsterdam 3 - "sgp1.digitaloceanspaces.com" - - Digital Ocean Spaces Singapore 1 + - DigitalOcean Spaces Singapore 1 - "localhost:8333" - SeaweedFS S3 localhost - "s3.us-east-1.lyvecloud.seagate.com" @@ -18425,15 +18711,33 @@ Properties: - "s3.ap-southeast-1.lyvecloud.seagate.com" - Seagate Lyve Cloud AP Southeast 1 (Singapore) - "s3.wasabisys.com" - - Wasabi US East endpoint + - Wasabi US East 1 (N. Virginia) + - "s3.us-east-2.wasabisys.com" + - Wasabi US East 2 (N. Virginia) + - "s3.us-central-1.wasabisys.com" + - Wasabi US Central 1 (Texas) - "s3.us-west-1.wasabisys.com" - - Wasabi US West endpoint + - Wasabi US West 1 (Oregon) + - "s3.ca-central-1.wasabisys.com" + - Wasabi CA Central 1 (Toronto) - "s3.eu-central-1.wasabisys.com" - - Wasabi EU Central endpoint + - Wasabi EU Central 1 (Amsterdam) + - "s3.eu-central-2.wasabisys.com" + - Wasabi EU Central 2 (Frankfurt) + - "s3.eu-west-1.wasabisys.com" + - Wasabi EU West 1 (London) + - "s3.eu-west-2.wasabisys.com" + - Wasabi EU West 2 (Paris) - "s3.ap-northeast-1.wasabisys.com" - Wasabi AP Northeast 1 (Tokyo) endpoint - "s3.ap-northeast-2.wasabisys.com" - Wasabi AP Northeast 2 (Osaka) endpoint + - "s3.ap-southeast-1.wasabisys.com" + - Wasabi AP Southeast 1 (Singapore) + - "s3.ap-southeast-2.wasabisys.com" + - Wasabi AP Southeast 2 (Sydney) + - "storage.iran.liara.space" + - Liara Iran endpoint - "s3.ir-thr-at1.arvanstorage.com" - ArvanCloud Tehran Iran (Asiatech) endpoint @@ -18767,7 +19071,7 @@ Properties: - Config: location_constraint - Env Var: RCLONE_S3_LOCATION_CONSTRAINT - Provider: - !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS + !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS - Type: string - Required: false @@ -18784,6 +19088,9 @@ https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Note that this ACL is applied when server-side copying objects as S3 doesn't copy the ACL from the source but rather writes a fresh one. +If the acl is an empty string then no X-Amz-Acl: header is added and the +default (private) will be used. + Properties: - Config: acl @@ -18952,6 +19259,21 @@ Properties: --s3-storage-class +The storage class to use when storing new objects in Liara + +Properties: + +- Config: storage_class +- Env Var: RCLONE_S3_STORAGE_CLASS +- Provider: Liara +- Type: string +- Required: false +- Examples: + - "STANDARD" + - Standard storage class + +--s3-storage-class + The storage class to use when storing new objects in ArvanCloud. Properties: @@ -19033,9 +19355,9 @@ Advanced options Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, -Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, -IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, -SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). +Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, +IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, +Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). --s3-bucket-acl @@ -19047,6 +19369,9 @@ https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Note that this ACL is applied when only when creating buckets. If it isn't set then "acl" is used instead. +If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: +header is added and the default (private) will be used. + Properties: - Config: bucket_acl @@ -19665,6 +19990,36 @@ Properties: - Type: bool - Default: false +--s3-might-gzip + +Set this if the backend might gzip objects. + +Normally providers will not alter objects when they are downloaded. If +an object was not uploaded with Content-Encoding: gzip then it won't be +set on download. + +However some providers may gzip objects even if they weren't uploaded +with Content-Encoding: gzip (eg Cloudflare). + +A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + +If you set this flag and rclone downloads an object with +Content-Encoding: gzip set and chunked transfer encoding, then rclone +will decompress the object on the fly. + +If this is set to unset (the default) then rclone will choose according +to the provider setting what to apply, but you can override rclone's +choice here. + +Properties: + +- Config: might_gzip +- Env Var: RCLONE_S3_MIGHT_GZIP +- Type: Tristate +- Default: unset + --s3-no-system-metadata Suppress setting and reading of system metadata @@ -19995,7 +20350,7 @@ of a bucket publicly. Type of storage to configure. Choose a number from below, or type in your own value. ... - XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi + XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) ... Storage> s3 @@ -20164,7 +20519,7 @@ Or you can also configure via the interactive command line: Type of storage to configure. Choose a number from below, or type in your own value. [snip] - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) [snip] Storage> 5 @@ -20294,7 +20649,7 @@ To configure access to IBM COS S3, follow the steps below: \ "alias" 2 / Amazon Drive \ "amazon cloud drive" - 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio, IBM COS) + 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, Liara, ArvanCloud, Minio, IBM COS) \ "s3" 4 / Backblaze B2 \ "b2" @@ -20455,7 +20810,7 @@ This will guide you through an interactive setup process. Type of storage to configure. Choose a number from below, or type in your own value. [snip] - XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi + XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) [snip] Storage> s3 @@ -20561,7 +20916,7 @@ Type s3 to choose the connection type: Type of storage to configure. Choose a number from below, or type in your own value. [snip] - XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi + XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \ (s3) [snip] Storage> s3 @@ -20795,7 +21150,7 @@ To configure access to Qiniu Kodo, follow the steps below: \ (alias) 4 / Amazon Drive \ (amazon cloud drive) - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi \ (s3) [snip] Storage> s3 @@ -21032,7 +21387,7 @@ Choose s3 backend Type of storage to configure. Choose a number from below, or type in your own value. [snip] - XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS + XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS \ (s3) [snip] Storage> s3 @@ -21196,7 +21551,7 @@ rclone like this. Type of storage to configure. Choose a number from below, or type in your own value [snip] - XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio) + XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio, Liara) \ "s3" [snip] Storage> s3 @@ -21306,7 +21661,7 @@ This will guide you through an interactive setup process. Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value [snip] - 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Minio, and Tencent COS + 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS \ "s3" [snip] Storage> s3 @@ -21414,7 +21769,7 @@ This will guide you through an interactive setup process. Type of storage to configure. Choose a number from below, or type in your own value. ... - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS \ (s3) ... Storage> s3 @@ -21643,6 +21998,103 @@ This will guide you through an interactive setup process. d) Delete this remote y/e/d> y +Liara + +Here is an example of making a Liara Object Storage configuration. First +run: + + rclone config + +This will guide you through an interactive setup process. + + No remotes found, make a new one? + n) New remote + s) Set configuration password + n/s> n + name> Liara + Type of storage to configure. + Choose a number from below, or type in your own value + [snip] + XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Liara, Minio) + \ "s3" + [snip] + Storage> s3 + Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank. + Choose a number from below, or type in your own value + 1 / Enter AWS credentials in the next step + \ "false" + 2 / Get AWS credentials from the environment (env vars or IAM) + \ "true" + env_auth> 1 + AWS Access Key ID - leave blank for anonymous access or runtime credentials. + access_key_id> YOURACCESSKEY + AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials. + secret_access_key> YOURSECRETACCESSKEY + Region to connect to. + Choose a number from below, or type in your own value + / The default endpoint + 1 | US Region, Northern Virginia, or Pacific Northwest. + | Leave location constraint empty. + \ "us-east-1" + [snip] + region> + Endpoint for S3 API. + Leave blank if using Liara to use the default endpoint for the region. + Specify if using an S3 clone such as Ceph. + endpoint> storage.iran.liara.space + Canned ACL used when creating buckets and/or storing objects in S3. + For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + Choose a number from below, or type in your own value + 1 / Owner gets FULL_CONTROL. No one else has access rights (default). + \ "private" + [snip] + acl> + The server-side encryption algorithm used when storing this object in S3. + Choose a number from below, or type in your own value + 1 / None + \ "" + 2 / AES256 + \ "AES256" + server_side_encryption> + The storage class to use when storing objects in S3. + Choose a number from below, or type in your own value + 1 / Default + \ "" + 2 / Standard storage class + \ "STANDARD" + storage_class> + Remote config + -------------------- + [Liara] + env_auth = false + access_key_id = YOURACCESSKEY + secret_access_key = YOURSECRETACCESSKEY + endpoint = storage.iran.liara.space + location_constraint = + acl = + server_side_encryption = + storage_class = + -------------------- + y) Yes this is OK + e) Edit this remote + d) Delete this remote + y/e/d> y + +This will leave the config file looking like this. + + [Liara] + type = s3 + provider = Liara + env_auth = false + access_key_id = YOURACCESSKEY + secret_access_key = YOURSECRETACCESSKEY + region = + endpoint = storage.iran.liara.space + location_constraint = + acl = + server_side_encryption = + storage_class = + ArvanCloud ArvanCloud ArvanCloud Object Storage goes beyond the limited traditional @@ -21662,7 +22114,7 @@ rclone like this. Type of storage to configure. Choose a number from below, or type in your own value [snip] - XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio) + XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Liara, Minio) \ "s3" [snip] Storage> s3 @@ -21779,7 +22231,7 @@ To configure access to Tencent COS, follow the steps below: \ "alias" 3 / Amazon Drive \ "amazon cloud drive" - 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Minio, and Tencent COS + 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS \ "s3" [snip] Storage> s3 @@ -22584,9 +23036,10 @@ This will guide you through an interactive setup process: \ "enterprise" box_sub_type> Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -22701,9 +23154,10 @@ Here is how to do it. y) Yes n) No y/n> y - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -22977,13 +23431,15 @@ Reverse Solidus). Box only supports filenames up to 255 characters in length. +Box has API rate limits that sometimes reduce the speed of rclone. + rclone about is not supported by the Box backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote. See List of backends that do not support rclone about and rclone about -Cache (DEPRECATED) +Cache The cache remote wraps another existing remote and stores file structure and its data for long running tasks like rclone mount. @@ -23678,7 +24134,7 @@ Print stats on the cache backend in JSON format. rclone backend stats remote: [options] [+] -Chunker (BETA) +Chunker The chunker overlay transparently splits large files into smaller chunks during upload to wrapped remote and transparently assembles them back @@ -24200,9 +24656,10 @@ This will guide you through an interactive setup process: n) No y/n> n Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -25137,7 +25594,7 @@ SEE ALSO - rclone cryptdecode - Show forward/reverse mapping of encrypted filenames -Compress (Experimental) +Compress Warning @@ -25494,6 +25951,15 @@ This will guide you through an interactive setup process: d) Delete this remote y/e/d> y +See the remote setup docs for how to set it up on a machine with no +Internet browser available. + +Note that rclone runs a webserver on your local machine to collect the +token as returned from Dropbox. This only runs from the moment it opens +your browser to the moment you get back the verification code. This is +on http://127.0.0.1:53682/ and it may require you to unblock it +temporarily if you are running a host firewall, or use manual mode. + You can then use it like this, List directories in top level of your dropbox @@ -26391,7 +26857,7 @@ Use Implicit FTPS (FTP over TLS). When using implicit FTP over TLS the client connects using TLS right from the start which breaks compatibility with non-TLS-aware servers. This is usually served over port 990 rather than port 21. Cannot be used -in combination with explicit FTP. +in combination with explicit FTPS. Properties: @@ -26406,7 +26872,7 @@ Use Explicit FTPS (FTP over TLS). When using explicit FTP over TLS the client explicitly requests security from the server in order to upgrade a plain text connection to an -encrypted one. Cannot be used in combination with implicit FTP. +encrypted one. Cannot be used in combination with implicit FTPS. Properties: @@ -26772,9 +27238,10 @@ This will guide you through an interactive setup process: \ "DURABLE_REDUCED_AVAILABILITY" storage_class> 5 Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine or Y didn't work + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -26797,12 +27264,15 @@ This will guide you through an interactive setup process: d) Delete this remote y/e/d> y +See the remote setup docs for how to set it up on a machine with no +Internet browser available. + Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. This only -runs from the moment it opens your browser to the moment you get back -the verification code. This is on http://127.0.0.1:53682/ and this it -may require you to unblock it temporarily if you are running a host -firewall, or use manual mode. +token as returned from Google if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to +the moment you get back the verification code. This is on +http://127.0.0.1:53682/ and this it may require you to unblock it +temporarily if you are running a host firewall, or use manual mode. This remote is called remote and can now be used like this @@ -27366,9 +27836,10 @@ This will guide you through an interactive setup process: Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login. service_account_file> Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine or Y didn't work + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -27394,12 +27865,15 @@ This will guide you through an interactive setup process: d) Delete this remote y/e/d> y +See the remote setup docs for how to set it up on a machine with no +Internet browser available. + Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. This only -runs from the moment it opens your browser to the moment you get back -the verification code. This is on http://127.0.0.1:53682/ and it may -require you to unblock it temporarily if you are running a host -firewall, or use manual mode. +token as returned from Google if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to +the moment you get back the verification code. This is on +http://127.0.0.1:53682/ and it may require you to unblock it temporarily +if you are running a host firewall, or use manual mode. You can then use it like this, @@ -28939,9 +29413,10 @@ This will guide you through an interactive setup process: n) No y/n> n Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -28964,12 +29439,15 @@ This will guide you through an interactive setup process: d) Delete this remote y/e/d> y +See the remote setup docs for how to set it up on a machine with no +Internet browser available. + Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. This only -runs from the moment it opens your browser to the moment you get back -the verification code. This is on http://127.0.0.1:53682/ and this may -require you to unblock it temporarily if you are running a host -firewall, or use manual mode. +token as returned from Google if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to +the moment you get back the verification code. This is on +http://127.0.0.1:53682/ and this may require you to unblock it +temporarily if you are running a host firewall, or use manual mode. This remote is called remote and can now be used like this @@ -29341,7 +29819,7 @@ Deleting albums The Google Photos API does not support deleting albums - see bug #135714733. -Hasher (EXPERIMENTAL) +Hasher Hasher is a special overlay backend to create remotes which handle checksums for other remotes. It's main functions include: - Emulate hash @@ -29934,7 +30412,10 @@ This will guide you through an interactive setup process: scope_access> Edit advanced config? y/n> n - Use auto config? + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y/n> y If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=xxxxxxxxxxxxxxxxxxxxxx Log in and authorize rclone for access @@ -30632,6 +31113,22 @@ can be triggered when you did a server-side copy. Reading metadata will also provide custom (non-standard nor reserved) ones. +Filtering auto generated files + +The Internet Archive automatically creates metadata files after upload. +These can cause problems when doing an rclone sync as rclone will try, +and fail, to delete them. These metadata files are not changeable, as +they are created by the Internet Archive automatically. + +These auto-created files can be excluded from the sync using metadata +filtering. + + rclone sync ... --metadata-exclude "source=metadata" --metadata-exclude "format=Metadata" + +Which excludes from the sync any files which have the source=metadata or +format=Metadata flags which are added to Internet Archive auto-created +files. + Configuration Here is an example of making an internetarchive configuration. Most @@ -31679,8 +32176,22 @@ Features highlights Configuration -Here is an example of making a mailru configuration. First create a -Mail.ru Cloud account and choose a tariff, then run +Here is an example of making a mailru configuration. + +First create a Mail.ru Cloud account and choose a tariff. + +You will need to log in and create an app password for rclone. Rclone +will not work with your normal username and password - it will give an +error like oauth2: server response missing access_token. + +- Click on your user icon in the top right +- Go to Security / "Пароль и безопасность" +- Click password for apps / "Пароли для внешних приложений" +- Add the password - give it a name - eg "rclone" +- Copy the password and use this password below - your normal login + password won't work. + +Now run rclone config @@ -31705,6 +32216,10 @@ This will guide you through an interactive setup process: Enter a string value. Press Enter for the default (""). user> username@mail.ru Password + + This must be an app password - rclone will not work with your normal + password. See the Configuration section in the docs for how to make an + app password. y) Yes type in my own password g) Generate random password y/g> y @@ -31826,6 +32341,10 @@ Properties: Password. +This must be an app password - rclone will not work with your normal +password. See the Configuration section in the docs for how to make an +app password. + NB Input to this must be obscured - see rclone obscure. Properties: @@ -32710,7 +33229,12 @@ Modified time The modified time is stored as metadata on the object with the mtime key. It is stored using RFC3339 Format time with nanosecond precision. The metadata is supplied during directory listings so there is no -overhead to using it. +performance overhead to using it. + +If you wish to use the Azure standard LastModified time stored on the +object as the modified time, then use the --use-server-modtime flag. +Note that rclone can't set LastModified, so using the --update flag when +syncing is recommended if using --use-server-modtime. Performance @@ -32746,11 +33270,88 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in chunks only have an MD5 if the source remote was capable of MD5 hashes, e.g. the local disk. -Authenticating with Azure Blob Storage +Authentication -Rclone has 3 ways of authenticating with Azure Blob Storage: +There are a number of ways of supplying credentials for Azure Blob +Storage. Rclone tries them in the order of the sections below. -Account and Key +Env Auth + +If the env_auth config parameter is true then rclone will pull +credentials from the environment or runtime. + +It tries these authentication methods in this order: + +1. Environment Variables +2. Managed Service Identity Credentials +3. Azure CLI credentials (as used by the az tool) + +These are described in the following sections + +Env Auth: 1. Environment Variables + +If env_auth is set and environment variables are present rclone +authenticates a service principal with a secret or certificate, or a +user with a password, depending on which environment variable are set. +It reads configuration from these variables, in the following order: + +1. Service principal with client secret + - AZURE_TENANT_ID: ID of the service principal's tenant. Also + called its "directory" ID. + - AZURE_CLIENT_ID: the service principal's client ID + - AZURE_CLIENT_SECRET: one of the service principal's client + secrets +2. Service principal with certificate + - AZURE_TENANT_ID: ID of the service principal's tenant. Also + called its "directory" ID. + - AZURE_CLIENT_ID: the service principal's client ID + - AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 + certificate file including the private key. + - AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the + certificate file. + - AZURE_CLIENT_SEND_CERTIFICATE_CHAIN: (optional) Specifies + whether an authentication request will include an x5c header to + support subject name / issuer based authentication. When set to + "true" or "1", authentication requests include the x5c header. +3. User with username and password + - AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults + to "organizations". + - AZURE_CLIENT_ID: client ID of the application the user will + authenticate to + - AZURE_USERNAME: a username (usually an email address) + - AZURE_PASSWORD: the user's password + +Env Auth: 2. Managed Service Identity Credentials + +When using Managed Service Identity if the VM(SS) on which this program +is running has a system-assigned identity, it will be used by default. +If the resource has no system-assigned but exactly one user-assigned +identity, the user-assigned identity will be used by default. + +If the resource has multiple user-assigned identities you will need to +unset env_auth and set use_msi instead. See the use_msi section. + +Env Auth: 3. Azure CLI credentials (as used by the az tool) + +Credentials created with the az tool can be picked up using env_auth. + +For example if you were to login with a service principal like this: + + az login --service-principal -u XXX -p XXX --tenant XXX + +Then you could access rclone resources like this: + + rclone lsf :azureblob,env_auth,account=ACCOUNT:CONTAINER + +Or + + rclone lsf --azureblob-env-auth --azureblob-acccount=ACCOUNT :azureblob:CONTAINER + +Which is analogous to using the az tool: + + az storage blob list --container-name CONTAINER --account-name ACCOUNT --auth-mode login + +Account and Shared Key This is the most straight forward and least flexible way. Just fill in the account and key lines and leave the rest blank. @@ -32759,7 +33360,7 @@ SAS URL This can be an account level SAS URL or container level SAS URL. -To use it leave account, key blank and fill in sas_url. +To use it leave account and key blank and fill in sas_url. An account level SAS URL or container level SAS URL can be obtained from the Azure portal or the Azure Storage Explorer. To get a container level @@ -32785,6 +33386,72 @@ Container level SAS URLs are useful for temporarily allowing third parties access to a single container or putting credentials into an untrusted environment such as a CI build server. +Service principal with client secret + +If these variables are set, rclone will authenticate with a service +principal with a client secret. + +- tenant: ID of the service principal's tenant. Also called its + "directory" ID. +- client_id: the service principal's client ID +- client_secret: one of the service principal's client secrets + +The credentials can also be placed in a file using the +service_principal_file configuration option. + +Service principal with certificate + +If these variables are set, rclone will authenticate with a service +principal with certificate. + +- tenant: ID of the service principal's tenant. Also called its + "directory" ID. +- client_id: the service principal's client ID +- client_certificate_path: path to a PEM or PKCS12 certificate file + including the private key. +- client_certificate_password: (optional) password for the certificate + file. +- client_send_certificate_chain: (optional) Specifies whether an + authentication request will include an x5c header to support subject + name / issuer based authentication. When set to "true" or "1", + authentication requests include the x5c header. + +NB client_certificate_password must be obscured - see rclone obscure. + +User with username and password + +If these variables are set, rclone will authenticate with username and +password. + +- tenant: (optional) tenant to authenticate in. Defaults to + "organizations". +- client_id: client ID of the application the user will authenticate + to +- username: a username (usually an email address) +- password: the user's password + +Microsoft doesn't recommend this kind of authentication, because it's +less secure than other authentication flows. This method is not +interactive, so it isn't compatible with any form of multi-factor +authentication, and the application must already have user or admin +consent. This credential can only authenticate work and school accounts; +it can't authenticate Microsoft accounts. + +NB password must be obscured - see rclone obscure. + +Managed Service Identity Credentials + +If use_msi is set then managed service identity credentials are used. +This authentication only works when running in an Azure service. +env_auth needs to be unset to use this. + +However if you have multiple user identities to choose from these must +be explicitly specified using exactly one of the msi_object_id, +msi_client_id, or msi_mi_res_id parameters. + +If none of msi_object_id, msi_client_id, or msi_mi_res_id is set, this +is is equivalent to using env_auth. + Standard options Here are the Standard options specific to azureblob (Microsoft Azure @@ -32792,9 +33459,14 @@ Blob Storage). --azureblob-account -Storage Account Name. +Azure Storage Account Name. -Leave blank to use SAS URL or Emulator. +Set this to the Azure Storage Account Name in use. + +Leave blank to use SAS URL or Emulator, otherwise it needs to be set. + +If this is blank and if env_auth is set it will be read from the +environment variable AZURE_STORAGE_ACCOUNT_NAME if possible. Properties: @@ -32803,31 +33475,22 @@ Properties: - Type: string - Required: false ---azureblob-service-principal-file +--azureblob-env-auth -Path to file containing credentials for use with a service principal. +Read credentials from runtime (environment variables, CLI or MSI). -Leave blank normally. Needed only if you want to use a service principal -instead of interactive login. - - $ az ad sp create-for-rbac --name "" \ - --role "Storage Blob Data Owner" \ - --scopes "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" \ - > azure-principal.json - -See "Create an Azure service principal" and "Assign an Azure role for -access to blob data" pages for more details. +See the authentication docs for full info. Properties: -- Config: service_principal_file -- Env Var: RCLONE_AZUREBLOB_SERVICE_PRINCIPAL_FILE -- Type: string -- Required: false +- Config: env_auth +- Env Var: RCLONE_AZUREBLOB_ENV_AUTH +- Type: bool +- Default: false --azureblob-key -Storage Account Key. +Storage Account Shared Key. Leave blank to use SAS URL or Emulator. @@ -32851,6 +33514,153 @@ Properties: - Type: string - Required: false +--azureblob-tenant + +ID of the service principal's tenant. Also called its directory ID. + +Set this if using - Service principal with client secret - Service +principal with certificate - User with username and password + +Properties: + +- Config: tenant +- Env Var: RCLONE_AZUREBLOB_TENANT +- Type: string +- Required: false + +--azureblob-client-id + +The ID of the client in use. + +Set this if using - Service principal with client secret - Service +principal with certificate - User with username and password + +Properties: + +- Config: client_id +- Env Var: RCLONE_AZUREBLOB_CLIENT_ID +- Type: string +- Required: false + +--azureblob-client-secret + +One of the service principal's client secrets + +Set this if using - Service principal with client secret + +Properties: + +- Config: client_secret +- Env Var: RCLONE_AZUREBLOB_CLIENT_SECRET +- Type: string +- Required: false + +--azureblob-client-certificate-path + +Path to a PEM or PKCS12 certificate file including the private key. + +Set this if using - Service principal with certificate + +Properties: + +- Config: client_certificate_path +- Env Var: RCLONE_AZUREBLOB_CLIENT_CERTIFICATE_PATH +- Type: string +- Required: false + +--azureblob-client-certificate-password + +Password for the certificate file (optional). + +Optionally set this if using - Service principal with certificate + +And the certificate has a password. + +NB Input to this must be obscured - see rclone obscure. + +Properties: + +- Config: client_certificate_password +- Env Var: RCLONE_AZUREBLOB_CLIENT_CERTIFICATE_PASSWORD +- Type: string +- Required: false + +Advanced options + +Here are the Advanced options specific to azureblob (Microsoft Azure +Blob Storage). + +--azureblob-client-send-certificate-chain + +Send the certificate chain when using certificate auth. + +Specifies whether an authentication request will include an x5c header +to support subject name / issuer based authentication. When set to true, +authentication requests include the x5c header. + +Optionally set this if using - Service principal with certificate + +Properties: + +- Config: client_send_certificate_chain +- Env Var: RCLONE_AZUREBLOB_CLIENT_SEND_CERTIFICATE_CHAIN +- Type: bool +- Default: false + +--azureblob-username + +User name (usually an email address) + +Set this if using - User with username and password + +Properties: + +- Config: username +- Env Var: RCLONE_AZUREBLOB_USERNAME +- Type: string +- Required: false + +--azureblob-password + +The user's password + +Set this if using - User with username and password + +NB Input to this must be obscured - see rclone obscure. + +Properties: + +- Config: password +- Env Var: RCLONE_AZUREBLOB_PASSWORD +- Type: string +- Required: false + +--azureblob-service-principal-file + +Path to file containing credentials for use with a service principal. + +Leave blank normally. Needed only if you want to use a service principal +instead of interactive login. + + $ az ad sp create-for-rbac --name "" \ + --role "Storage Blob Data Owner" \ + --scopes "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" \ + > azure-principal.json + +See "Create an Azure service principal" and "Assign an Azure role for +access to blob data" pages for more details. + +It may be more convenient to put the credentials directly into the +rclone config file under the client_id, tenant and client_secret keys +instead of setting service_principal_file. + +Properties: + +- Config: service_principal_file +- Env Var: RCLONE_AZUREBLOB_SERVICE_PRINCIPAL_FILE +- Type: string +- Required: false + --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure). @@ -32873,24 +33683,6 @@ Properties: - Type: bool - Default: false ---azureblob-use-emulator - -Uses local storage emulator if provided as 'true'. - -Leave blank if using real azure storage endpoint. - -Properties: - -- Config: use_emulator -- Env Var: RCLONE_AZUREBLOB_USE_EMULATOR -- Type: bool -- Default: false - -Advanced options - -Here are the Advanced options specific to azureblob (Microsoft Azure -Blob Storage). - --azureblob-msi-object-id Object ID of the user-assigned MSI to use, if any. @@ -32930,6 +33722,19 @@ Properties: - Type: string - Required: false +--azureblob-use-emulator + +Uses local storage emulator if provided as 'true'. + +Leave blank if using real azure storage endpoint. + +Properties: + +- Config: use_emulator +- Env Var: RCLONE_AZUREBLOB_USE_EMULATOR +- Type: bool +- Default: false + --azureblob-endpoint Endpoint for the service. @@ -33132,6 +33937,20 @@ Properties: - "container" - Allow full public read access for container and blob data. +--azureblob-no-check-container + +If set, don't attempt to check the container exists or create it. + +This can be useful when trying to minimise the number of transactions +rclone does if you know the container exists already. + +Properties: + +- Config: no_check_container +- Env Var: RCLONE_AZUREBLOB_NO_CHECK_CONTAINER +- Type: bool +- Default: false + --azureblob-no-head-object If set, do not do HEAD before GET when getting objects. @@ -33143,6 +33962,18 @@ Properties: - Type: bool - Default: false +Custom upload headers + +You can set custom upload headers with the --header-upload flag. + +- Cache-Control +- Content-Disposition +- Content-Encoding +- Content-Language +- Content-Type + +Eg --header-upload "Content-Type: text/potato" + Limitations MD5 sums are only uploaded with chunked files if the source has an MD5 @@ -33157,16 +33988,19 @@ See List of backends that do not support rclone about and rclone about Azure Storage Emulator Support -You can run rclone with storage emulator (usually azurite). +You can run rclone with the storage emulator (usually azurite). -To do this, just set up a new remote with rclone config following -instructions described in introduction and set use_emulator config as -true. You do not need to provide default account name neither an account -key. +To do this, just set up a new remote with rclone config following the +instructions in the introduction and set use_emulator in the advanced +settings as true. You do not need to provide a default account name nor +an account key. But you can override them in the account and key +options. (Prior to v1.61 they were hard coded to azurite's +devstoreaccount1.) Also, if you want to access a storage emulator instance running on a -different machine, you can override Endpoint parameter in advanced -settings, setting it to http(s)://:/devstoreaccount1 (e.g. +different machine, you can override the endpoint parameter in the +advanced settings, setting it to +http(s)://:/devstoreaccount1 (e.g. http://10.254.2.5:10000/devstoreaccount1). Microsoft OneDrive @@ -33217,9 +34051,10 @@ This will guide you through an interactive setup process: n) No y/n> n Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -33926,6 +34761,30 @@ didn't allow public links to be made for the organisation/sharepoint library. To fix the permissions as an admin, take a look at the docs: 1, 2. +Can not access Shared with me files + +Shared with me files is not supported by rclone currently, but there is +a workaround: + +1. Visit https://onedrive.live.com + +2. Right click a item in Shared, then click Add shortcut to My files in + the context + + Screenshot (Shared with me) + + [make_shortcut] + +3. The shortcut will appear in My files, you can access it with rclone, + it behaves like a normal folder/file. + + Screenshot (My Files) + + [in_my_files] + +Screenshot (rclone mount) + +[rclone_mount] OpenDrive Paths are specified as remote:path @@ -35782,9 +36641,10 @@ This will guide you through an interactive setup process: Pcloud App Client Secret - leave blank normally. client_secret> Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -36062,9 +36922,10 @@ This will guide you through an interactive setup process: ** See help for premiumizeme backend at: https://rclone.org/premiumizeme/ ** Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -36204,9 +37065,10 @@ This will guide you through an interactive setup process: ** See help for putio backend at: https://rclone.org/putio/ ** Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -36238,12 +37100,15 @@ This will guide you through an interactive setup process: q) Quit config e/n/d/r/c/s/q> q +See the remote setup docs for how to set it up on a machine with no +Internet browser available. + Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. This only -runs from the moment it opens your browser to the moment you get back -the verification code. This is on http://127.0.0.1:53682/ and this it -may require you to unblock it temporarily if you are running a host -firewall, or use manual mode. +token as returned from put.io if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to +the moment you get back the verification code. This is on +http://127.0.0.1:53682/ and this it may require you to unblock it +temporarily if you are running a host firewall, or use manual mode. You can then use it like this, @@ -37192,6 +38057,9 @@ methods: Those algorithms are insecure and may allow plaintext data to be recovered by an attacker. +This must be false if you use either ciphers or key_exchange advanced +options. + Properties: - Config: use_insecure_cipher @@ -37519,6 +38387,66 @@ Properties: - Type: SpaceSepList - Default: +--sftp-ciphers + +Space separated list of ciphers to be used for session encryption, +ordered by preference. + +At least one must match with server configuration. This can be checked +for example using ssh -Q cipher. + +This must not be set if use_insecure_cipher is true. + +Example: + + aes128-ctr aes192-ctr aes256-ctr aes128-gcm@openssh.com aes256-gcm@openssh.com + +Properties: + +- Config: ciphers +- Env Var: RCLONE_SFTP_CIPHERS +- Type: SpaceSepList +- Default: + +--sftp-key-exchange + +Space separated list of key exchange algorithms, ordered by preference. + +At least one must match with server configuration. This can be checked +for example using ssh -Q kex. + +This must not be set if use_insecure_cipher is true. + +Example: + + sntrup761x25519-sha512@openssh.com curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256 + +Properties: + +- Config: key_exchange +- Env Var: RCLONE_SFTP_KEY_EXCHANGE +- Type: SpaceSepList +- Default: + +--sftp-macs + +Space separated list of MACs (message authentication code) algorithms, +ordered by preference. + +At least one must match with server configuration. This can be checked +for example using ssh -Q mac. + +Example: + + umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com + +Properties: + +- Config: macs +- Env Var: RCLONE_SFTP_MACS +- Type: SpaceSepList +- Default: + Limitations On some SFTP servers (e.g. Synology) the paths are different for SSH and @@ -39385,9 +40313,10 @@ This will guide you through an interactive setup process: Yandex Client Secret - leave blank normally. client_secret> Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -39617,9 +40546,10 @@ This will guide you through an interactive setup process: n) No (default) y/n> n Remote config - Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine + Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access + If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> @@ -40428,6 +41358,187 @@ Options: Changelog +v1.61.0 - 2022-12-20 + +See commits + +- New backends + - New S3 providers + - Liara LOS (MohammadReza) +- New Features + - build: Add vulnerability testing using govulncheck (albertony) + - cmd: Enable SIGINFO (Ctrl-T) handler on FreeBSD, NetBSD, OpenBSD + and Dragonfly BSD (x3-apptech) + - config: Add config/setpath for setting config path via + rc/librclone (Nick Craig-Wood) + - dedupe + - Count Checks in the stats while scanning for duplicates + (Nick Craig-Wood) + - Make dedupe obey the filters (Nick Craig-Wood) + - dlna: Properly attribute code used from + https://github.com/anacrolix/dms (Nick Craig-Wood) + - docs + - Add minimum versions and status badges to backend and + command docs (Nick Craig-Wood, albertony) + - Remote names may not start or end with space (albertony) + - filter: Add metadata filters --metadata-include/exclude/filter + and friends (Nick Craig-Wood) + - fs + - Make all duration flags take y, M, w, d etc suffixes (Nick + Craig-Wood) + - Add global flag --color to control terminal colors (Kevin + Verstaen) + - fspath: Allow unicode numbers and letters in remote names + (albertony) + - lib/file: Improve error message for creating dir on non-existent + network host on windows (albertony) + - lib/http: Finish port of rclone servers to lib/http (Tom + Mombourquette, Nick Craig-Wood) + - lib/oauthutil: Improved usability of config flows needing web + browser (Ole Frost) + - ncdu + - Add support for modification time (albertony) + - Fallback to sort by name also for sort by average size + (albertony) + - Rework to use tcell directly instead of the termbox wrapper + (eNV25) + - rc: Add commands to set GC Percent & Memory Limit (go 1.19+) + (Anagh Kumar Baranwal) + - rcat: Preserve metadata when Copy falls back to Rcat (Nick + Craig-Wood) + - rcd: Refactor rclone rc server to use lib/http (Nick Craig-Wood) + - rcserver: Avoid generating default credentials with htpasswd + (Kamui) + - restic: Refactor to use lib/http (Nolan Woods) + - serve http: Support unix sockets and multiple listeners (Tom + Mombourquette) + - serve webdav: Refactor to use lib/http (Nick Craig-Wood) + - test: Replace defer cleanup with t.Cleanup (Eng Zer Jun) + - test memory: Read metadata if -M flag is specified (Nick + Craig-Wood) + - wasm: Comply with wasm_exec.js licence terms (Matthew Vernon) +- Bug Fixes + - build: Update golang.org/x/net/http2 to fix GO-2022-1144 (Nick + Craig-Wood) + - restic: Fix typo in docs 'remove' should be 'remote' + (asdffdsazqqq) + - serve dlna: Fix panic: Logger uninitialized. (Nick Craig-Wood) +- Mount + - Update cgofuse for FUSE-T support for mounting volumes on Mac + (Nick Craig-Wood) +- VFS + - Windows: fix slow opening of exe files by not truncating files + when not necessary (Nick Craig-Wood) + - Fix IO Error opening a file with O_CREATE|O_RDONLY in + --vfs-cache-mode not full (Nick Craig-Wood) +- Crypt + - Fix compress wrapping crypt giving upload errors (Nick + Craig-Wood) +- Azure Blob + - Port to new SDK (Nick Craig-Wood) + - Revamp authentication to include all methods and docs (Nick + Craig-Wood) + - Port old authentication methods to new SDK (Nick Craig-Wood, + Brad Ackerman) + - Thanks to Stonebranch for sponsoring this work. + - Add --azureblob-no-check-container to assume container exists + (Nick Craig-Wood) + - Add --use-server-modtime support (Abdullah Saglam) + - Add support for custom upload headers (rkettelerij) + - Allow emulator account/key override (Roel Arents) + - Support simple "environment credentials" (Nathaniel Wesley + Filardo) + - Ignore AuthorizationFailure when trying to create a create a + container (Nick Craig-Wood) +- Box + - Added note on Box API rate limits (Ole Frost) +- Drive + - Handle shared drives with leading/trailing space in name + (related to) (albertony) +- FTP + - Update help text of implicit/explicit TLS options to refer to + FTPS instead of FTP (ycdtosa) + - Improve performance to speed up --files-from and NewObject + (Anthony Pessy) +- HTTP + - Parse GET responses when no_head is set (Arnie97) + - Do not update object size based on Range requests (Arnie97) + - Support Content-Range response header (Arnie97) +- Onedrive + - Document workaround for shared with me files (vanplus) +- S3 + - Add Liara LOS to provider list (MohammadReza) + - Add DigitalOcean Spaces regions sfo3, fra1, syd1 (Jack) + - Avoid privileged GetBucketLocation to resolve s3 region (Anthony + Pessy) + - Stop setting object and bucket ACL to private if it is an empty + string (Philip Harvey) + - If bucket or object ACL is empty string then don't add + X-Amz-Acl: header (Nick Craig-Wood) + - Reduce memory consumption for s3 objects (Erik Agterdenbos) + - Fix listing loop when using v2 listing on v1 server (Nick + Craig-Wood) + - Fix nil pointer exception when using Versions (Nick Craig-Wood) + - Fix excess memory usage when using versions (Nick Craig-Wood) + - Ignore versionIDs from uploads unless using --s3-versions or + --s3-versions-at (Nick Craig-Wood) +- SFTP + - Add configuration options to set ssh Ciphers / MACs / + KeyExchange (dgouju) + - Auto-detect shell type for fish (albertony) + - Fix NewObject with leading / (Nick Craig-Wood) +- Smb + - Fix issue where spurious dot directory is created (albertony) +- Storj + - Implement server side Copy (Kaloyan Raev) + +v1.60.1 - 2022-11-17 + +See commits + +- Bug Fixes + - lib/cache: Fix alias backend shutting down too soon (Nick + Craig-Wood) + - wasm: Fix walltime link error by adding up-to-date wasm_exec.js + (João Henrique Franco) + - docs + - Update faq.md with bisync (Samuel Johnson) + - Corrected download links in windows install docs + (coultonluke) + - Add direct download link for windows arm64 (albertony) + - Remove link to rclone slack as it is no longer supported + (Nick Craig-Wood) + - Faq: how to use a proxy server that requires a username and + password (asdffdsazqqq) + - Oracle-object-storage: doc fix (Manoj Ghosh) + - Fix typo remove in rclone_serve_restic command (Joda Stößer) + - Fix character that was incorrectly interpreted as markdown + (Clément Notin) +- VFS + - Fix deadlock caused by cache cleaner and upload finishing (Nick + Craig-Wood) +- Local + - Clean absolute paths (albertony) + - Fix -L/--copy-links with filters missing directories (Nick + Craig-Wood) +- Mailru + - Note that an app password is now needed (Nick Craig-Wood) + - Allow timestamps to be before the epoch 1970-01-01 (Nick + Craig-Wood) +- S3 + - Add provider quirk --s3-might-gzip to fix corrupted on transfer: + sizes differ (Nick Craig-Wood) + - Allow Storj to server side copy since it seems to work now (Nick + Craig-Wood) + - Fix for unchecked err value in s3 listv2 (Aaron Gokaslan) + - Add additional Wasabi locations (techknowlogick) +- Smb + - Fix Failed to sync: context canceled at the end of syncs (Nick + Craig-Wood) +- WebDAV + - Fix Move/Copy/DirMove when using -server-side-across-configs + (Nick Craig-Wood) + v1.60.0 - 2022-10-21 See commits @@ -45966,9 +47077,7 @@ metadata, which breaks the desired 1:1 mapping of files to objects. Can rclone do bi-directional sync? -No, not at present. rclone only does uni-directional sync from A -> B. -It may do in the future though since it has all the primitives - it just -requires writing the algorithm to do it. +Yes, since rclone v1.58.0, bidirectional cloud sync is available. Can I use rclone with an HTTP proxy? @@ -45993,6 +47102,13 @@ set all possibilities. So, on Linux, you may end up with code similar to export HTTP_PROXY=$http_proxy export HTTPS_PROXY=$http_proxy +Note: If the proxy server requires a username and password, then use + + export http_proxy=http://username:password@proxyserver:12345 + export https_proxy=$http_proxy + export HTTP_PROXY=$http_proxy + export HTTPS_PROXY=$http_proxy + The NO_PROXY allows you to disable the proxy for specific hosts. Hosts must be comma separated, and can contain domains or parts. For instance "foo.com" also matches "bar.foo.com". @@ -46363,6 +47479,7 @@ email addresses removed from here need to be addeed to bin/.ignore-emails to mak - Jay dev@jaygoel.com - andrea rota a@xelera.eu - nicolov nicolov@users.noreply.github.com +- Matt Joiner anacrolix@gmail.com - Dario Guzik dario@guzik.com.ar - qip qip@users.noreply.github.com - yair@unicorn yair@unicorn @@ -46787,6 +47904,28 @@ email addresses removed from here need to be addeed to bin/.ignore-emails to mak - Manoj Ghosh manoj.ghosh@oracle.com - Tom Mombourquette tom@devnode.com - Robert Newson rnewson@apache.org +- Samuel Johnson esamueljohnson@gmail.com +- coultonluke luke@luke.org.uk +- Anthony Pessy anthony@cogniteev.com +- Philip Harvey pharvey@battelleecology.org +- dgouju dgouju@users.noreply.github.com +- Clément Notin clement.notin@gmail.com +- x3-apptech 66947598+x3-apptech@users.noreply.github.com +- Arnie97 arnie97@gmail.com +- Roel Arents 2691308+roelarents@users.noreply.github.com +- Aaron Gokaslan aaronGokaslan@gmail.com +- techknowlogick matti@mdranta.net +- rkettelerij richard@mindloops.nl +- Kamui fin-kamui@pm.me +- asdffdsazqqq 90116442+asdffdsazqqq@users.noreply.github.com +- Nathaniel Wesley Filardo nfilardo@microsoft.com +- ycdtosa ycdtosa@users.noreply.github.com +- Erik Agterdenbos agterdenbos@users.noreply.github.com +- Kevin Verstaen 48050031+kverstae@users.noreply.github.com +- MohammadReza mrvashian@gmail.com +- vanplus 60313789+vanplus@users.noreply.github.com +- Jack 16779171+jkpe@users.noreply.github.com +- Abdullah Saglam abdullah.saglam@stonebranch.com Contact the rclone project diff --git a/docs/content/changelog.md b/docs/content/changelog.md index 2c844f256..2ba5494ea 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -5,6 +5,101 @@ description: "Rclone Changelog" # Changelog +## v1.61.0 - 2022-12-20 + +[See commits](https://github.com/rclone/rclone/compare/v1.60.0...v1.61.0) + +* New backends + * New S3 providers + * [Liara LOS](/s3/#liara-cloud) (MohammadReza) +* New Features + * build: Add vulnerability testing using govulncheck (albertony) + * cmd: Enable `SIGINFO` (Ctrl-T) handler on FreeBSD, NetBSD, OpenBSD and Dragonfly BSD (x3-apptech) + * config: Add [config/setpath](/rc/#config-setpath) for setting config path via rc/librclone (Nick Craig-Wood) + * dedupe + * Count Checks in the stats while scanning for duplicates (Nick Craig-Wood) + * Make dedupe obey the filters (Nick Craig-Wood) + * dlna: Properly attribute code used from https://github.com/anacrolix/dms (Nick Craig-Wood) + * docs + * Add minimum versions and status badges to backend and command docs (Nick Craig-Wood, albertony) + * Remote names may not start or end with space (albertony) + * filter: Add metadata filters [--metadata-include/exclude/filter](/filtering/#metadata) and friends (Nick Craig-Wood) + * fs + * Make all duration flags take `y`, `M`, `w`, `d` etc suffixes (Nick Craig-Wood) + * Add global flag `--color` to control terminal colors (Kevin Verstaen) + * fspath: Allow unicode numbers and letters in remote names (albertony) + * lib/file: Improve error message for creating dir on non-existent network host on windows (albertony) + * lib/http: Finish port of rclone servers to `lib/http` (Tom Mombourquette, Nick Craig-Wood) + * lib/oauthutil: Improved usability of config flows needing web browser (Ole Frost) + * ncdu + * Add support for modification time (albertony) + * Fallback to sort by name also for sort by average size (albertony) + * Rework to use tcell directly instead of the termbox wrapper (eNV25) + * rc: Add commands to set [GC Percent](/rc/#debug-set-gc-percent) & [Memory Limit](/rc/#debug-set-soft-memory-limit) (go 1.19+) (Anagh Kumar Baranwal) + * rcat: Preserve metadata when Copy falls back to Rcat (Nick Craig-Wood) + * rcd: Refactor rclone rc server to use `lib/http` (Nick Craig-Wood) + * rcserver: Avoid generating default credentials with htpasswd (Kamui) + * restic: Refactor to use `lib/http` (Nolan Woods) + * serve http: Support unix sockets and multiple listeners (Tom Mombourquette) + * serve webdav: Refactor to use `lib/http` (Nick Craig-Wood) + * test: Replace defer cleanup with `t.Cleanup` (Eng Zer Jun) + * test memory: Read metadata if `-M` flag is specified (Nick Craig-Wood) + * wasm: Comply with `wasm_exec.js` licence terms (Matthew Vernon) +* Bug Fixes + * build: Update `golang.org/x/net/http2` to fix GO-2022-1144 (Nick Craig-Wood) + * restic: Fix typo in docs 'remove' should be 'remote' (asdffdsazqqq) + * serve dlna: Fix panic: Logger uninitialized. (Nick Craig-Wood) +* Mount + * Update cgofuse for FUSE-T support for mounting volumes on Mac (Nick Craig-Wood) +* VFS + * Windows: fix slow opening of exe files by not truncating files when not necessary (Nick Craig-Wood) + * Fix IO Error opening a file with `O_CREATE|O_RDONLY` in `--vfs-cache-mode` not full (Nick Craig-Wood) +* Crypt + * Fix compress wrapping crypt giving upload errors (Nick Craig-Wood) +* Azure Blob + * Port to new SDK (Nick Craig-Wood) + * Revamp authentication to include all methods and docs (Nick Craig-Wood) + * Port old authentication methods to new SDK (Nick Craig-Wood, Brad Ackerman) + * Thanks to [Stonebranch](https://www.stonebranch.com/) for sponsoring this work. + * Add `--azureblob-no-check-container` to assume container exists (Nick Craig-Wood) + * Add `--use-server-modtime` support (Abdullah Saglam) + * Add support for custom upload headers (rkettelerij) + * Allow emulator account/key override (Roel Arents) + * Support simple "environment credentials" (Nathaniel Wesley Filardo) + * Ignore `AuthorizationFailure` when trying to create a create a container (Nick Craig-Wood) +* Box + * Added note on Box API rate limits (Ole Frost) +* Drive + * Handle shared drives with leading/trailing space in name (related to) (albertony) +* FTP + * Update help text of implicit/explicit TLS options to refer to FTPS instead of FTP (ycdtosa) + * Improve performance to speed up `--files-from` and `NewObject` (Anthony Pessy) +* HTTP + * Parse GET responses when `no_head` is set (Arnie97) + * Do not update object size based on `Range` requests (Arnie97) + * Support `Content-Range` response header (Arnie97) +* Onedrive + * Document workaround for shared with me files (vanplus) +* S3 + * Add Liara LOS to provider list (MohammadReza) + * Add DigitalOcean Spaces regions `sfo3`, `fra1`, `syd1` (Jack) + * Avoid privileged `GetBucketLocation` to resolve s3 region (Anthony Pessy) + * Stop setting object and bucket ACL to `private` if it is an empty string (Philip Harvey) + * If bucket or object ACL is empty string then don't add `X-Amz-Acl:` header (Nick Craig-Wood) + * Reduce memory consumption for s3 objects (Erik Agterdenbos) + * Fix listing loop when using v2 listing on v1 server (Nick Craig-Wood) + * Fix nil pointer exception when using Versions (Nick Craig-Wood) + * Fix excess memory usage when using versions (Nick Craig-Wood) + * Ignore versionIDs from uploads unless using `--s3-versions` or `--s3-versions-at` (Nick Craig-Wood) +* SFTP + * Add configuration options to set ssh Ciphers / MACs / KeyExchange (dgouju) + * Auto-detect shell type for fish (albertony) + * Fix NewObject with leading / (Nick Craig-Wood) +* Smb + * Fix issue where spurious dot directory is created (albertony) +* Storj + * Implement server side Copy (Kaloyan Raev) + ## v1.60.1 - 2022-11-17 [See commits](https://github.com/rclone/rclone/compare/v1.60.0...v1.60.1) diff --git a/docs/content/commands/rclone_about.md b/docs/content/commands/rclone_about.md index 07a01d410..4464aa80a 100644 --- a/docs/content/commands/rclone_about.md +++ b/docs/content/commands/rclone_about.md @@ -3,6 +3,7 @@ title: "rclone about" description: "Get quota information from the remote." slug: rclone_about url: /commands/rclone_about/ +versionIntroduced: v1.41 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/about/ and as part of making a release run "make commanddocs" --- # rclone about diff --git a/docs/content/commands/rclone_authorize.md b/docs/content/commands/rclone_authorize.md index 4e8fd0ba0..3620829eb 100644 --- a/docs/content/commands/rclone_authorize.md +++ b/docs/content/commands/rclone_authorize.md @@ -3,6 +3,7 @@ title: "rclone authorize" description: "Remote authorization." slug: rclone_authorize url: /commands/rclone_authorize/ +versionIntroduced: v1.27 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/authorize/ and as part of making a release run "make commanddocs" --- # rclone authorize diff --git a/docs/content/commands/rclone_backend.md b/docs/content/commands/rclone_backend.md index 9887db60d..8e29f42ed 100644 --- a/docs/content/commands/rclone_backend.md +++ b/docs/content/commands/rclone_backend.md @@ -3,6 +3,7 @@ title: "rclone backend" description: "Run a backend-specific command." slug: rclone_backend url: /commands/rclone_backend/ +versionIntroduced: v1.52 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/backend/ and as part of making a release run "make commanddocs" --- # rclone backend diff --git a/docs/content/commands/rclone_bisync.md b/docs/content/commands/rclone_bisync.md index 4a7127329..dacda9937 100644 --- a/docs/content/commands/rclone_bisync.md +++ b/docs/content/commands/rclone_bisync.md @@ -3,6 +3,7 @@ title: "rclone bisync" description: "Perform bidirectional synchronization between two paths." slug: rclone_bisync url: /commands/rclone_bisync/ +versionIntroduced: v1.58 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/bisync/ and as part of making a release run "make commanddocs" --- # rclone bisync diff --git a/docs/content/commands/rclone_cat.md b/docs/content/commands/rclone_cat.md index 0b3b40f71..6ad41423d 100644 --- a/docs/content/commands/rclone_cat.md +++ b/docs/content/commands/rclone_cat.md @@ -3,6 +3,7 @@ title: "rclone cat" description: "Concatenates any files and sends them to stdout." slug: rclone_cat url: /commands/rclone_cat/ +versionIntroduced: v1.33 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/cat/ and as part of making a release run "make commanddocs" --- # rclone cat diff --git a/docs/content/commands/rclone_checksum.md b/docs/content/commands/rclone_checksum.md index e285358bd..661dab6b2 100644 --- a/docs/content/commands/rclone_checksum.md +++ b/docs/content/commands/rclone_checksum.md @@ -3,6 +3,7 @@ title: "rclone checksum" description: "Checks the files in the source against a SUM file." slug: rclone_checksum url: /commands/rclone_checksum/ +versionIntroduced: v1.56 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/checksum/ and as part of making a release run "make commanddocs" --- # rclone checksum diff --git a/docs/content/commands/rclone_cleanup.md b/docs/content/commands/rclone_cleanup.md index 680b4f981..0b7b0b189 100644 --- a/docs/content/commands/rclone_cleanup.md +++ b/docs/content/commands/rclone_cleanup.md @@ -3,6 +3,7 @@ title: "rclone cleanup" description: "Clean up the remote if possible." slug: rclone_cleanup url: /commands/rclone_cleanup/ +versionIntroduced: v1.31 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs" --- # rclone cleanup diff --git a/docs/content/commands/rclone_config.md b/docs/content/commands/rclone_config.md index f6ca5e419..186d90d26 100644 --- a/docs/content/commands/rclone_config.md +++ b/docs/content/commands/rclone_config.md @@ -3,6 +3,7 @@ title: "rclone config" description: "Enter an interactive configuration session." slug: rclone_config url: /commands/rclone_config/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/ and as part of making a release run "make commanddocs" --- # rclone config diff --git a/docs/content/commands/rclone_config_create.md b/docs/content/commands/rclone_config_create.md index 8d12ea86b..a923b246d 100644 --- a/docs/content/commands/rclone_config_create.md +++ b/docs/content/commands/rclone_config_create.md @@ -3,6 +3,7 @@ title: "rclone config create" description: "Create a new remote with name, type and options." slug: rclone_config_create url: /commands/rclone_config_create/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/create/ and as part of making a release run "make commanddocs" --- # rclone config create @@ -57,7 +58,7 @@ This will look something like (some irrelevant detail removed): "State": "*oauth-islocal,teamdrive,,", "Option": { "Name": "config_is_local", - "Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n", + "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Default": true, "Examples": [ { diff --git a/docs/content/commands/rclone_config_delete.md b/docs/content/commands/rclone_config_delete.md index 6b82e8006..c58bed4e4 100644 --- a/docs/content/commands/rclone_config_delete.md +++ b/docs/content/commands/rclone_config_delete.md @@ -3,6 +3,7 @@ title: "rclone config delete" description: "Delete an existing remote." slug: rclone_config_delete url: /commands/rclone_config_delete/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/delete/ and as part of making a release run "make commanddocs" --- # rclone config delete diff --git a/docs/content/commands/rclone_config_dump.md b/docs/content/commands/rclone_config_dump.md index 5f471c40b..e0119936d 100644 --- a/docs/content/commands/rclone_config_dump.md +++ b/docs/content/commands/rclone_config_dump.md @@ -3,6 +3,7 @@ title: "rclone config dump" description: "Dump the config file as JSON." slug: rclone_config_dump url: /commands/rclone_config_dump/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/dump/ and as part of making a release run "make commanddocs" --- # rclone config dump diff --git a/docs/content/commands/rclone_config_file.md b/docs/content/commands/rclone_config_file.md index 0b289a4e5..8585b77f9 100644 --- a/docs/content/commands/rclone_config_file.md +++ b/docs/content/commands/rclone_config_file.md @@ -3,6 +3,7 @@ title: "rclone config file" description: "Show path of configuration file in use." slug: rclone_config_file url: /commands/rclone_config_file/ +versionIntroduced: v1.38 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/file/ and as part of making a release run "make commanddocs" --- # rclone config file diff --git a/docs/content/commands/rclone_config_password.md b/docs/content/commands/rclone_config_password.md index ee7fd3d3d..f69301811 100644 --- a/docs/content/commands/rclone_config_password.md +++ b/docs/content/commands/rclone_config_password.md @@ -3,6 +3,7 @@ title: "rclone config password" description: "Update password in an existing remote." slug: rclone_config_password url: /commands/rclone_config_password/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/password/ and as part of making a release run "make commanddocs" --- # rclone config password diff --git a/docs/content/commands/rclone_config_paths.md b/docs/content/commands/rclone_config_paths.md index 7171828dd..7762ce090 100644 --- a/docs/content/commands/rclone_config_paths.md +++ b/docs/content/commands/rclone_config_paths.md @@ -3,6 +3,7 @@ title: "rclone config paths" description: "Show paths used for configuration, cache, temp etc." slug: rclone_config_paths url: /commands/rclone_config_paths/ +versionIntroduced: v1.57 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/paths/ and as part of making a release run "make commanddocs" --- # rclone config paths diff --git a/docs/content/commands/rclone_config_providers.md b/docs/content/commands/rclone_config_providers.md index ed89ea5fe..43441af08 100644 --- a/docs/content/commands/rclone_config_providers.md +++ b/docs/content/commands/rclone_config_providers.md @@ -3,6 +3,7 @@ title: "rclone config providers" description: "List in JSON format all the providers and options." slug: rclone_config_providers url: /commands/rclone_config_providers/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/providers/ and as part of making a release run "make commanddocs" --- # rclone config providers diff --git a/docs/content/commands/rclone_config_show.md b/docs/content/commands/rclone_config_show.md index 97b818f36..beab56349 100644 --- a/docs/content/commands/rclone_config_show.md +++ b/docs/content/commands/rclone_config_show.md @@ -3,6 +3,7 @@ title: "rclone config show" description: "Print (decrypted) config file, or the config for a single remote." slug: rclone_config_show url: /commands/rclone_config_show/ +versionIntroduced: v1.38 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/show/ and as part of making a release run "make commanddocs" --- # rclone config show diff --git a/docs/content/commands/rclone_config_touch.md b/docs/content/commands/rclone_config_touch.md index ebea6e257..d96c37027 100644 --- a/docs/content/commands/rclone_config_touch.md +++ b/docs/content/commands/rclone_config_touch.md @@ -3,6 +3,7 @@ title: "rclone config touch" description: "Ensure configuration file exists." slug: rclone_config_touch url: /commands/rclone_config_touch/ +versionIntroduced: v1.56 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/touch/ and as part of making a release run "make commanddocs" --- # rclone config touch diff --git a/docs/content/commands/rclone_config_update.md b/docs/content/commands/rclone_config_update.md index 468a68307..9e3fa9a38 100644 --- a/docs/content/commands/rclone_config_update.md +++ b/docs/content/commands/rclone_config_update.md @@ -3,6 +3,7 @@ title: "rclone config update" description: "Update options in an existing remote." slug: rclone_config_update url: /commands/rclone_config_update/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/update/ and as part of making a release run "make commanddocs" --- # rclone config update @@ -57,7 +58,7 @@ This will look something like (some irrelevant detail removed): "State": "*oauth-islocal,teamdrive,,", "Option": { "Name": "config_is_local", - "Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n", + "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Default": true, "Examples": [ { diff --git a/docs/content/commands/rclone_copyto.md b/docs/content/commands/rclone_copyto.md index a5cfa974e..bfd8b5e72 100644 --- a/docs/content/commands/rclone_copyto.md +++ b/docs/content/commands/rclone_copyto.md @@ -3,6 +3,7 @@ title: "rclone copyto" description: "Copy files from source to dest, skipping identical files." slug: rclone_copyto url: /commands/rclone_copyto/ +versionIntroduced: v1.35 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs" --- # rclone copyto diff --git a/docs/content/commands/rclone_copyurl.md b/docs/content/commands/rclone_copyurl.md index 1188f0647..2d13cd9a8 100644 --- a/docs/content/commands/rclone_copyurl.md +++ b/docs/content/commands/rclone_copyurl.md @@ -3,6 +3,7 @@ title: "rclone copyurl" description: "Copy url content to dest." slug: rclone_copyurl url: /commands/rclone_copyurl/ +versionIntroduced: v1.43 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyurl/ and as part of making a release run "make commanddocs" --- # rclone copyurl diff --git a/docs/content/commands/rclone_cryptcheck.md b/docs/content/commands/rclone_cryptcheck.md index 3e81e53e9..21528b029 100644 --- a/docs/content/commands/rclone_cryptcheck.md +++ b/docs/content/commands/rclone_cryptcheck.md @@ -3,6 +3,7 @@ title: "rclone cryptcheck" description: "Cryptcheck checks the integrity of a crypted remote." slug: rclone_cryptcheck url: /commands/rclone_cryptcheck/ +versionIntroduced: v1.36 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/cryptcheck/ and as part of making a release run "make commanddocs" --- # rclone cryptcheck diff --git a/docs/content/commands/rclone_cryptdecode.md b/docs/content/commands/rclone_cryptdecode.md index c4836ec8d..11be5a7d1 100644 --- a/docs/content/commands/rclone_cryptdecode.md +++ b/docs/content/commands/rclone_cryptdecode.md @@ -3,6 +3,7 @@ title: "rclone cryptdecode" description: "Cryptdecode returns unencrypted file names." slug: rclone_cryptdecode url: /commands/rclone_cryptdecode/ +versionIntroduced: v1.38 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/cryptdecode/ and as part of making a release run "make commanddocs" --- # rclone cryptdecode diff --git a/docs/content/commands/rclone_dedupe.md b/docs/content/commands/rclone_dedupe.md index 6b77f17cb..a7a711b45 100644 --- a/docs/content/commands/rclone_dedupe.md +++ b/docs/content/commands/rclone_dedupe.md @@ -3,6 +3,7 @@ title: "rclone dedupe" description: "Interactively find duplicate filenames and delete/rename them." slug: rclone_dedupe url: /commands/rclone_dedupe/ +versionIntroduced: v1.27 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/dedupe/ and as part of making a release run "make commanddocs" --- # rclone dedupe diff --git a/docs/content/commands/rclone_delete.md b/docs/content/commands/rclone_delete.md index 09076a46a..08ec20f98 100644 --- a/docs/content/commands/rclone_delete.md +++ b/docs/content/commands/rclone_delete.md @@ -3,6 +3,7 @@ title: "rclone delete" description: "Remove the files in path." slug: rclone_delete url: /commands/rclone_delete/ +versionIntroduced: v1.27 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/delete/ and as part of making a release run "make commanddocs" --- # rclone delete diff --git a/docs/content/commands/rclone_deletefile.md b/docs/content/commands/rclone_deletefile.md index 865498d1f..0cdaaf860 100644 --- a/docs/content/commands/rclone_deletefile.md +++ b/docs/content/commands/rclone_deletefile.md @@ -3,6 +3,7 @@ title: "rclone deletefile" description: "Remove a single file from remote." slug: rclone_deletefile url: /commands/rclone_deletefile/ +versionIntroduced: v1.42 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/deletefile/ and as part of making a release run "make commanddocs" --- # rclone deletefile diff --git a/docs/content/commands/rclone_genautocomplete.md b/docs/content/commands/rclone_genautocomplete.md index 3838dda4a..efaa38b07 100644 --- a/docs/content/commands/rclone_genautocomplete.md +++ b/docs/content/commands/rclone_genautocomplete.md @@ -3,6 +3,7 @@ title: "rclone genautocomplete" description: "Output completion script for a given shell." slug: rclone_genautocomplete url: /commands/rclone_genautocomplete/ +versionIntroduced: v1.33 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/genautocomplete/ and as part of making a release run "make commanddocs" --- # rclone genautocomplete diff --git a/docs/content/commands/rclone_gendocs.md b/docs/content/commands/rclone_gendocs.md index b5d8bef45..5389340c6 100644 --- a/docs/content/commands/rclone_gendocs.md +++ b/docs/content/commands/rclone_gendocs.md @@ -3,6 +3,7 @@ title: "rclone gendocs" description: "Output markdown docs for rclone to the directory supplied." slug: rclone_gendocs url: /commands/rclone_gendocs/ +versionIntroduced: v1.33 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/gendocs/ and as part of making a release run "make commanddocs" --- # rclone gendocs diff --git a/docs/content/commands/rclone_hashsum.md b/docs/content/commands/rclone_hashsum.md index f9b96e6a4..e1a78e3be 100644 --- a/docs/content/commands/rclone_hashsum.md +++ b/docs/content/commands/rclone_hashsum.md @@ -3,6 +3,7 @@ title: "rclone hashsum" description: "Produces a hashsum file for all the objects in the path." slug: rclone_hashsum url: /commands/rclone_hashsum/ +versionIntroduced: v1.41 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/hashsum/ and as part of making a release run "make commanddocs" --- # rclone hashsum diff --git a/docs/content/commands/rclone_link.md b/docs/content/commands/rclone_link.md index 00a2d0202..456849873 100644 --- a/docs/content/commands/rclone_link.md +++ b/docs/content/commands/rclone_link.md @@ -3,6 +3,7 @@ title: "rclone link" description: "Generate public link to file/folder." slug: rclone_link url: /commands/rclone_link/ +versionIntroduced: v1.41 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/link/ and as part of making a release run "make commanddocs" --- # rclone link diff --git a/docs/content/commands/rclone_listremotes.md b/docs/content/commands/rclone_listremotes.md index 54e4317c8..fdf20b88c 100644 --- a/docs/content/commands/rclone_listremotes.md +++ b/docs/content/commands/rclone_listremotes.md @@ -3,6 +3,7 @@ title: "rclone listremotes" description: "List all the remotes in the config file." slug: rclone_listremotes url: /commands/rclone_listremotes/ +versionIntroduced: v1.34 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/listremotes/ and as part of making a release run "make commanddocs" --- # rclone listremotes diff --git a/docs/content/commands/rclone_lsf.md b/docs/content/commands/rclone_lsf.md index 0a34277fa..8567cbc9e 100644 --- a/docs/content/commands/rclone_lsf.md +++ b/docs/content/commands/rclone_lsf.md @@ -3,6 +3,7 @@ title: "rclone lsf" description: "List directories and objects in remote:path formatted for parsing." slug: rclone_lsf url: /commands/rclone_lsf/ +versionIntroduced: v1.40 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsf/ and as part of making a release run "make commanddocs" --- # rclone lsf diff --git a/docs/content/commands/rclone_lsjson.md b/docs/content/commands/rclone_lsjson.md index e3cac5d21..fbb979363 100644 --- a/docs/content/commands/rclone_lsjson.md +++ b/docs/content/commands/rclone_lsjson.md @@ -3,6 +3,7 @@ title: "rclone lsjson" description: "List directories and objects in the path in JSON format." slug: rclone_lsjson url: /commands/rclone_lsjson/ +versionIntroduced: v1.37 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsjson/ and as part of making a release run "make commanddocs" --- # rclone lsjson @@ -120,6 +121,7 @@ rclone lsjson remote:path [flags] --hash Include hashes in the output (may take longer) --hash-type stringArray Show only this hash type (may be repeated) -h, --help help for lsjson + -M, --metadata Add metadata to the listing --no-mimetype Don't read the mime type (can speed things up) --no-modtime Don't read the modification time (can speed things up) --original Show the ID of the underlying Object diff --git a/docs/content/commands/rclone_lsl.md b/docs/content/commands/rclone_lsl.md index f493916a9..b7419e7ac 100644 --- a/docs/content/commands/rclone_lsl.md +++ b/docs/content/commands/rclone_lsl.md @@ -3,6 +3,7 @@ title: "rclone lsl" description: "List the objects in path with modification time, size and path." slug: rclone_lsl url: /commands/rclone_lsl/ +versionIntroduced: v1.02 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsl/ and as part of making a release run "make commanddocs" --- # rclone lsl diff --git a/docs/content/commands/rclone_md5sum.md b/docs/content/commands/rclone_md5sum.md index 9cd53cad0..c80506429 100644 --- a/docs/content/commands/rclone_md5sum.md +++ b/docs/content/commands/rclone_md5sum.md @@ -3,6 +3,7 @@ title: "rclone md5sum" description: "Produces an md5sum file for all the objects in the path." slug: rclone_md5sum url: /commands/rclone_md5sum/ +versionIntroduced: v1.02 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/md5sum/ and as part of making a release run "make commanddocs" --- # rclone md5sum diff --git a/docs/content/commands/rclone_mount.md b/docs/content/commands/rclone_mount.md index b322b23f8..2c8fdb169 100644 --- a/docs/content/commands/rclone_mount.md +++ b/docs/content/commands/rclone_mount.md @@ -3,6 +3,7 @@ title: "rclone mount" description: "Mount the remote as file system on a mountpoint." slug: rclone_mount url: /commands/rclone_mount/ +versionIntroduced: v1.33 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/mount/ and as part of making a release run "make commanddocs" --- # rclone mount @@ -722,14 +723,14 @@ rclone mount remote:path /path/to/mountpoint [flags] --allow-other Allow access to other users (not supported on Windows) --allow-root Allow access to root user (not supported on Windows) --async-read Use asynchronous reads (not supported on Windows) (default true) - --attr-timeout duration Time for which file/directory attributes are cached (default 1s) + --attr-timeout Duration Time for which file/directory attributes are cached (default 1s) --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows) - --daemon-timeout duration Time limit for rclone to respond to kernel (not supported on Windows) - --daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) + --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s) + --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) --debug-fuse Debug the FUSE internals - needs -v --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) @@ -743,24 +744,24 @@ rclone mount remote:path /path/to/mountpoint [flags] --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true) --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only) -o, --option stringArray Option for libfuse/WinFsp (repeat if required) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` diff --git a/docs/content/commands/rclone_move.md b/docs/content/commands/rclone_move.md index ce97800a2..cd2e96fa3 100644 --- a/docs/content/commands/rclone_move.md +++ b/docs/content/commands/rclone_move.md @@ -3,6 +3,7 @@ title: "rclone move" description: "Move files from source to dest." slug: rclone_move url: /commands/rclone_move/ +versionIntroduced: v1.19 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/move/ and as part of making a release run "make commanddocs" --- # rclone move diff --git a/docs/content/commands/rclone_moveto.md b/docs/content/commands/rclone_moveto.md index 6f8488f83..4a236fe93 100644 --- a/docs/content/commands/rclone_moveto.md +++ b/docs/content/commands/rclone_moveto.md @@ -3,6 +3,7 @@ title: "rclone moveto" description: "Move file or directory from source to dest." slug: rclone_moveto url: /commands/rclone_moveto/ +versionIntroduced: v1.35 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/moveto/ and as part of making a release run "make commanddocs" --- # rclone moveto diff --git a/docs/content/commands/rclone_ncdu.md b/docs/content/commands/rclone_ncdu.md index 154eb219b..a92250cf2 100644 --- a/docs/content/commands/rclone_ncdu.md +++ b/docs/content/commands/rclone_ncdu.md @@ -3,6 +3,7 @@ title: "rclone ncdu" description: "Explore a remote with a text based user interface." slug: rclone_ncdu url: /commands/rclone_ncdu/ +versionIntroduced: v1.37 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/ncdu/ and as part of making a release run "make commanddocs" --- # rclone ncdu @@ -29,11 +30,12 @@ press '?' to toggle the help on and off. The supported keys are: ↑,↓ or k,j to Move →,l to enter ←,h to return - c toggle counts g toggle graph + c toggle counts a toggle average size in directory + m toggle modified time u toggle human-readable format - n,s,C,A sort by name,size,count,average size + n,s,C,A,M sort by name,size,count,asize,mtime d delete file/directory v select file/directory V enter visual select mode diff --git a/docs/content/commands/rclone_obscure.md b/docs/content/commands/rclone_obscure.md index 0a77772a8..06f556430 100644 --- a/docs/content/commands/rclone_obscure.md +++ b/docs/content/commands/rclone_obscure.md @@ -3,6 +3,7 @@ title: "rclone obscure" description: "Obscure password for use in the rclone config file." slug: rclone_obscure url: /commands/rclone_obscure/ +versionIntroduced: v1.36 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/obscure/ and as part of making a release run "make commanddocs" --- # rclone obscure diff --git a/docs/content/commands/rclone_rc.md b/docs/content/commands/rclone_rc.md index 4d2dbb07f..1cdd9b135 100644 --- a/docs/content/commands/rclone_rc.md +++ b/docs/content/commands/rclone_rc.md @@ -3,6 +3,7 @@ title: "rclone rc" description: "Run a command against a running rclone." slug: rclone_rc url: /commands/rclone_rc/ +versionIntroduced: v1.40 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/rc/ and as part of making a release run "make commanddocs" --- # rclone rc diff --git a/docs/content/commands/rclone_rcat.md b/docs/content/commands/rclone_rcat.md index 86d4c793e..a51eda851 100644 --- a/docs/content/commands/rclone_rcat.md +++ b/docs/content/commands/rclone_rcat.md @@ -3,6 +3,7 @@ title: "rclone rcat" description: "Copies standard input to file on remote." slug: rclone_rcat url: /commands/rclone_rcat/ +versionIntroduced: v1.38 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/rcat/ and as part of making a release run "make commanddocs" --- # rclone rcat diff --git a/docs/content/commands/rclone_rcd.md b/docs/content/commands/rclone_rcd.md index 7e3508d3f..d179b606a 100644 --- a/docs/content/commands/rclone_rcd.md +++ b/docs/content/commands/rclone_rcd.md @@ -3,6 +3,7 @@ title: "rclone rcd" description: "Run rclone listening to remote control commands only." slug: rclone_rcd url: /commands/rclone_rcd/ +versionIntroduced: v1.45 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/rcd/ and as part of making a release run "make commanddocs" --- # rclone rcd @@ -22,6 +23,101 @@ the browser when rclone is run. See the [rc documentation](/rc/) for more info on the rc flags. +## Server options + +Use `--addr` to specify which IP address and port the server should +listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all +IPs. By default it only listens on localhost. You can use port +:0 to let the OS choose an available port. + +If you set `--addr` to listen on a public or LAN accessible IP address +then using Authentication is advised - see the next section for info. + +You can use a unix socket by setting the url to `unix:///path/to/socket` +or just by using an absolute path name. Note that unix sockets bypass the +authentication - this is expected to be done with file system permissions. + +`--addr` may be repeated to listen on multiple IPs/ports/sockets. + +`--server-read-timeout` and `--server-write-timeout` can be used to +control the timeouts on the server. Note that this is the total time +for a transfer. + +`--max-header-bytes` controls the maximum number of bytes the server will +accept in the HTTP header. + +`--baseurl` controls the URL prefix that rclone serves from. By default +rclone will serve from the root. If you used `--baseurl "/rclone"` then +rclone would serve from a URL starting with "/rclone/". This is +useful if you wish to proxy rclone serve. Rclone automatically +inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`, +`--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated +identically. + +### TLS (SSL) + +By default this will serve over http. If you want you can serve over +https. You will need to supply the `--cert` and `--key` flags. +If you wish to do client side certificate validation then you will need to +supply `--client-ca` also. + +`--cert` should be a either a PEM encoded certificate or a concatenation +of that with the CA certificate. `--key` should be the PEM encoded +private key and `--client-ca` should be the PEM encoded client +certificate authority certificate. + +--min-tls-version is minimum TLS version that is acceptable. Valid + values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default + "tls1.0"). + +### Template + +`--template` allows a user to specify a custom markup template for HTTP +and WebDAV serve functions. The server exports the following markup +to be used within the template to server pages: + +| Parameter | Description | +| :---------- | :---------- | +| .Name | The full path of a file/directory. | +| .Title | Directory listing of .Name | +| .Sort | The current sort used. This is changeable via ?sort= parameter | +| | Sort Options: namedirfirst,name,size,time (default namedirfirst) | +| .Order | The current ordering used. This is changeable via ?order= parameter | +| | Order Options: asc,desc (default asc) | +| .Query | Currently unused. | +| .Breadcrumb | Allows for creating a relative navigation | +|-- .Link | The relative to the root link of the Text. | +|-- .Text | The Name of the directory. | +| .Entries | Information about a specific file/directory. | +|-- .URL | The 'url' of an entry. | +|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. | +|-- .IsDir | Boolean for if an entry is a directory or not. | +|-- .Size | Size in Bytes of the entry. | +|-- .ModTime | The UTC timestamp of an entry. | + +### Authentication + +By default this will serve files without needing a login. + +You can either use an htpasswd file which can take lots of users, or +set a single username and password with the `--user` and `--pass` flags. + +Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is +in standard apache format and supports MD5, SHA1 and BCrypt for basic +authentication. Bcrypt is recommended. + +To create an htpasswd file: + + touch htpasswd + htpasswd -B htpasswd user + htpasswd -B htpasswd anotherUser + +The password file can be updated while rclone is running. + +Use `--realm` to set the authentication realm. + +Use `--salt` to change the password hashing salt from the default. + ``` rclone rcd * [flags] diff --git a/docs/content/commands/rclone_rmdirs.md b/docs/content/commands/rclone_rmdirs.md index ba5dc56fc..22196b848 100644 --- a/docs/content/commands/rclone_rmdirs.md +++ b/docs/content/commands/rclone_rmdirs.md @@ -3,6 +3,7 @@ title: "rclone rmdirs" description: "Remove empty directories under the path." slug: rclone_rmdirs url: /commands/rclone_rmdirs/ +versionIntroduced: v1.35 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/rmdirs/ and as part of making a release run "make commanddocs" --- # rclone rmdirs diff --git a/docs/content/commands/rclone_selfupdate.md b/docs/content/commands/rclone_selfupdate.md index 2709c63f1..8a12883ad 100644 --- a/docs/content/commands/rclone_selfupdate.md +++ b/docs/content/commands/rclone_selfupdate.md @@ -3,6 +3,7 @@ title: "rclone selfupdate" description: "Update the rclone binary." slug: rclone_selfupdate url: /commands/rclone_selfupdate/ +versionIntroduced: v1.55 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/selfupdate/ and as part of making a release run "make commanddocs" --- # rclone selfupdate diff --git a/docs/content/commands/rclone_serve.md b/docs/content/commands/rclone_serve.md index 12d8141a8..ff08cc8b7 100644 --- a/docs/content/commands/rclone_serve.md +++ b/docs/content/commands/rclone_serve.md @@ -3,6 +3,7 @@ title: "rclone serve" description: "Serve a remote over a protocol." slug: rclone_serve url: /commands/rclone_serve/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/ and as part of making a release run "make commanddocs" --- # rclone serve diff --git a/docs/content/commands/rclone_serve_dlna.md b/docs/content/commands/rclone_serve_dlna.md index 0cb85400e..00718ef47 100644 --- a/docs/content/commands/rclone_serve_dlna.md +++ b/docs/content/commands/rclone_serve_dlna.md @@ -3,6 +3,7 @@ title: "rclone serve dlna" description: "Serve remote:path over DLNA" slug: rclone_serve_dlna url: /commands/rclone_serve_dlna/ +versionIntroduced: v1.46 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/dlna/ and as part of making a release run "make commanddocs" --- # rclone serve dlna @@ -362,8 +363,8 @@ rclone serve dlna remote:path [flags] ``` --addr string The ip:port or :port to bind the DLNA http server to (default ":7879") - --announce-interval duration The interval between SSDP announcements (default 12m0s) - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --announce-interval Duration The interval between SSDP announcements (default 12m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -374,24 +375,24 @@ rclone serve dlna remote:path [flags] --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](/flags/) for global options not listed here. diff --git a/docs/content/commands/rclone_serve_docker.md b/docs/content/commands/rclone_serve_docker.md index b294968a0..a7d02fc03 100644 --- a/docs/content/commands/rclone_serve_docker.md +++ b/docs/content/commands/rclone_serve_docker.md @@ -3,6 +3,7 @@ title: "rclone serve docker" description: "Serve any remote on docker's volume plugin API." slug: rclone_serve_docker url: /commands/rclone_serve_docker/ +versionIntroduced: v1.56 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/docker/ and as part of making a release run "make commanddocs" --- # rclone serve docker @@ -381,15 +382,15 @@ rclone serve docker [flags] --allow-other Allow access to other users (not supported on Windows) --allow-root Allow access to root user (not supported on Windows) --async-read Use asynchronous reads (not supported on Windows) (default true) - --attr-timeout duration Time for which file/directory attributes are cached (default 1s) + --attr-timeout Duration Time for which file/directory attributes are cached (default 1s) --base-dir string Base directory for volumes (default "/var/lib/docker-volumes/rclone") --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows) - --daemon-timeout duration Time limit for rclone to respond to kernel (not supported on Windows) - --daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) + --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s) + --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) --debug-fuse Debug the FUSE internals - needs -v --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --forget-state Skip restoring previous state @@ -405,26 +406,26 @@ rclone serve docker [flags] --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true) --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only) -o, --option stringArray Option for libfuse/WinFsp (repeat if required) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --socket-addr string Address or absolute path (default: /run/docker/plugins/rclone.sock) --socket-gid int GID for unix socket (default: current process GID) (default 1000) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` diff --git a/docs/content/commands/rclone_serve_ftp.md b/docs/content/commands/rclone_serve_ftp.md index 3274a92c4..475e7bb21 100644 --- a/docs/content/commands/rclone_serve_ftp.md +++ b/docs/content/commands/rclone_serve_ftp.md @@ -3,6 +3,7 @@ title: "rclone serve ftp" description: "Serve remote:path over FTP." slug: rclone_serve_ftp url: /commands/rclone_serve_ftp/ +versionIntroduced: v1.44 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/ftp/ and as part of making a release run "make commanddocs" --- # rclone serve ftp @@ -442,7 +443,7 @@ rclone serve ftp remote:path [flags] --addr string IPaddress:Port or :Port to bind server to (default "localhost:2121") --auth-proxy string A program to use to create the backend from the auth --cert string TLS PEM key (concatenation of certificate and CA certificate) - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -453,26 +454,26 @@ rclone serve ftp remote:path [flags] --no-seek Don't allow seeking in files --pass string Password for authentication (empty value allow every password) --passive-port string Passive port range to use (default "30000-32000") - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --public-ip string Public IP address to advertise for passive connections --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication (default "anonymous") - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](/flags/) for global options not listed here. diff --git a/docs/content/commands/rclone_serve_http.md b/docs/content/commands/rclone_serve_http.md index 5ad079b41..fe9eeaa13 100644 --- a/docs/content/commands/rclone_serve_http.md +++ b/docs/content/commands/rclone_serve_http.md @@ -3,6 +3,7 @@ title: "rclone serve http" description: "Serve the remote over HTTP." slug: rclone_serve_http url: /commands/rclone_serve_http/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/http/ and as part of making a release run "make commanddocs" --- # rclone serve http @@ -33,6 +34,12 @@ IPs. By default it only listens on localhost. You can use port If you set `--addr` to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. +You can use a unix socket by setting the url to `unix:///path/to/socket` +or just by using an absolute path name. Note that unix sockets bypass the +authentication - this is expected to be done with file system permissions. + +`--addr` may be repeated to listen on multiple IPs/ports/sockets. + `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -48,7 +55,7 @@ inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`, `--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated identically. -### SSL/TLS +### TLS (SSL) By default this will serve over http. If you want you can serve over https. You will need to supply the `--cert` and `--key` flags. @@ -438,47 +445,47 @@ rclone serve http remote:path [flags] ## Options ``` - --addr string IPaddress:Port or :Port to bind server to (default "127.0.0.1:8080") + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --baseurl string Prefix for URLs - leave blank for root - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for http --htpasswd string A htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --realm string Realm for authentication --salt string Password hashing salt (default "dlPL2MqE") - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](/flags/) for global options not listed here. diff --git a/docs/content/commands/rclone_serve_restic.md b/docs/content/commands/rclone_serve_restic.md index 40a4ed6d8..cbd8cd5ac 100644 --- a/docs/content/commands/rclone_serve_restic.md +++ b/docs/content/commands/rclone_serve_restic.md @@ -3,6 +3,7 @@ title: "rclone serve restic" description: "Serve the remote for restic's REST API." slug: rclone_serve_restic url: /commands/rclone_serve_restic/ +versionIntroduced: v1.40 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/restic/ and as part of making a release run "make commanddocs" --- # rclone serve restic @@ -96,13 +97,19 @@ with a path of `//`. ## Server options Use `--addr` to specify which IP address and port the server should -listen on, e.g. `--addr 1.2.3.4:8000` or `--addr :8080` to -listen to all IPs. By default it only listens on localhost. You can use port +listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all +IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port. If you set `--addr` to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. +You can use a unix socket by setting the url to `unix:///path/to/socket` +or just by using an absolute path name. Note that unix sockets bypass the +authentication - this is expected to be done with file system permissions. + +`--addr` may be repeated to listen on multiple IPs/ports/sockets. + `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -118,28 +125,21 @@ inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`, `--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated identically. -`--template` allows a user to specify a custom markup template for HTTP -and WebDAV serve functions. The server exports the following markup -to be used within the template to server pages: +### TLS (SSL) -| Parameter | Description | -| :---------- | :---------- | -| .Name | The full path of a file/directory. | -| .Title | Directory listing of .Name | -| .Sort | The current sort used. This is changeable via ?sort= parameter | -| | Sort Options: namedirfirst,name,size,time (default namedirfirst) | -| .Order | The current ordering used. This is changeable via ?order= parameter | -| | Order Options: asc,desc (default asc) | -| .Query | Currently unused. | -| .Breadcrumb | Allows for creating a relative navigation | -|-- .Link | The relative to the root link of the Text. | -|-- .Text | The Name of the directory. | -| .Entries | Information about a specific file/directory. | -|-- .URL | The 'url' of an entry. | -|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. | -|-- .IsDir | Boolean for if an entry is a directory or not. | -|-- .Size | Size in Bytes of the entry. | -|-- .ModTime | The UTC timestamp of an entry. | +By default this will serve over http. If you want you can serve over +https. You will need to supply the `--cert` and `--key` flags. +If you wish to do client side certificate validation then you will need to +supply `--client-ca` also. + +`--cert` should be a either a PEM encoded certificate or a concatenation +of that with the CA certificate. `--key` should be the PEM encoded +private key and `--client-ca` should be the PEM encoded client +certificate authority certificate. + +--min-tls-version is minimum TLS version that is acceptable. Valid + values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default + "tls1.0"). ### Authentication @@ -162,21 +162,7 @@ The password file can be updated while rclone is running. Use `--realm` to set the authentication realm. -### SSL/TLS - -By default this will serve over HTTP. If you want you can serve over -HTTPS. You will need to supply the `--cert` and `--key` flags. -If you wish to do client side certificate validation then you will need to -supply `--client-ca` also. - -`--cert` should be either a PEM encoded certificate or a concatenation -of that with the CA certificate. `--key` should be the PEM encoded -private key and `--client-ca` should be the PEM encoded client -certificate authority certificate. - ---min-tls-version is minimum TLS version that is acceptable. Valid - values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default - "tls1.0"). +Use `--salt` to change the password hashing salt from the default. ``` @@ -186,24 +172,24 @@ rclone serve restic remote:path [flags] ## Options ``` - --addr string IPaddress:Port or :Port to bind server to (default "localhost:8080") + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --append-only Disallow deletion of repository data --baseurl string Prefix for URLs - leave blank for root --cache-objects Cache listed objects (default true) - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with -h, --help help for restic - --htpasswd string htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --htpasswd string A htpasswd file - if not provided no authentication is done + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --pass string Password for authentication --private-repos Users can only access their private repo - --realm string Realm for authentication (default "rclone") - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --realm string Realm for authentication + --salt string Password hashing salt (default "dlPL2MqE") + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --stdio Run an HTTP2 server on stdin/stdout - --template string User-specified template --user string User name for authentication ``` diff --git a/docs/content/commands/rclone_serve_sftp.md b/docs/content/commands/rclone_serve_sftp.md index 9bed264fb..1e878c058 100644 --- a/docs/content/commands/rclone_serve_sftp.md +++ b/docs/content/commands/rclone_serve_sftp.md @@ -3,6 +3,7 @@ title: "rclone serve sftp" description: "Serve the remote over SFTP." slug: rclone_serve_sftp url: /commands/rclone_serve_sftp/ +versionIntroduced: v1.48 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/sftp/ and as part of making a release run "make commanddocs" --- # rclone serve sftp @@ -474,7 +475,7 @@ rclone serve sftp remote:path [flags] --addr string IPaddress:Port or :Port to bind server to (default "localhost:2022") --auth-proxy string A program to use to create the backend from the auth --authorized-keys string Authorized keys file (default "~/.ssh/authorized_keys") - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -485,26 +486,26 @@ rclone serve sftp remote:path [flags] --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --stdio Run an sftp server on stdin/stdout --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](/flags/) for global options not listed here. diff --git a/docs/content/commands/rclone_serve_webdav.md b/docs/content/commands/rclone_serve_webdav.md index 21c2491ea..88e6973db 100644 --- a/docs/content/commands/rclone_serve_webdav.md +++ b/docs/content/commands/rclone_serve_webdav.md @@ -3,6 +3,7 @@ title: "rclone serve webdav" description: "Serve remote:path over WebDAV." slug: rclone_serve_webdav url: /commands/rclone_serve_webdav/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/webdav/ and as part of making a release run "make commanddocs" --- # rclone serve webdav @@ -31,13 +32,19 @@ to see the full list. ## Server options Use `--addr` to specify which IP address and port the server should -listen on, e.g. `--addr 1.2.3.4:8000` or `--addr :8080` to -listen to all IPs. By default it only listens on localhost. You can use port +listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all +IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port. If you set `--addr` to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. +You can use a unix socket by setting the url to `unix:///path/to/socket` +or just by using an absolute path name. Note that unix sockets bypass the +authentication - this is expected to be done with file system permissions. + +`--addr` may be repeated to listen on multiple IPs/ports/sockets. + `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -53,6 +60,24 @@ inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`, `--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated identically. +### TLS (SSL) + +By default this will serve over http. If you want you can serve over +https. You will need to supply the `--cert` and `--key` flags. +If you wish to do client side certificate validation then you will need to +supply `--client-ca` also. + +`--cert` should be a either a PEM encoded certificate or a concatenation +of that with the CA certificate. `--key` should be the PEM encoded +private key and `--client-ca` should be the PEM encoded client +certificate authority certificate. + +--min-tls-version is minimum TLS version that is acceptable. Valid + values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default + "tls1.0"). + +### Template + `--template` allows a user to specify a custom markup template for HTTP and WebDAV serve functions. The server exports the following markup to be used within the template to server pages: @@ -97,21 +122,7 @@ The password file can be updated while rclone is running. Use `--realm` to set the authentication realm. -### SSL/TLS - -By default this will serve over HTTP. If you want you can serve over -HTTPS. You will need to supply the `--cert` and `--key` flags. -If you wish to do client side certificate validation then you will need to -supply `--client-ca` also. - -`--cert` should be either a PEM encoded certificate or a concatenation -of that with the CA certificate. `--key` should be the PEM encoded -private key and `--client-ca` should be the PEM encoded client -certificate authority certificate. - ---min-tls-version is minimum TLS version that is acceptable. Valid - values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default - "tls1.0"). +Use `--salt` to change the password hashing salt from the default. ## VFS - Virtual File System @@ -520,49 +531,50 @@ rclone serve webdav remote:path [flags] ## Options ``` - --addr string IPaddress:Port or :Port to bind server to (default "localhost:8080") + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --disable-dir-list Disable HTML directory list on GET request for a directory --etag-hash string Which hash to use for the ETag, or auto or blank for off --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for webdav - --htpasswd string htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --htpasswd string A htpasswd file - if not provided no authentication is done + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access - --realm string Realm for authentication (default "rclone") - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --realm string Realm for authentication + --salt string Password hashing salt (default "dlPL2MqE") + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` See the [global flags page](/flags/) for global options not listed here. diff --git a/docs/content/commands/rclone_settier.md b/docs/content/commands/rclone_settier.md index 014a4fce1..b69f9f302 100644 --- a/docs/content/commands/rclone_settier.md +++ b/docs/content/commands/rclone_settier.md @@ -3,6 +3,7 @@ title: "rclone settier" description: "Changes storage class/tier of objects in remote." slug: rclone_settier url: /commands/rclone_settier/ +versionIntroduced: v1.44 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/settier/ and as part of making a release run "make commanddocs" --- # rclone settier diff --git a/docs/content/commands/rclone_sha1sum.md b/docs/content/commands/rclone_sha1sum.md index 49a09ec29..3d02f6d41 100644 --- a/docs/content/commands/rclone_sha1sum.md +++ b/docs/content/commands/rclone_sha1sum.md @@ -3,6 +3,7 @@ title: "rclone sha1sum" description: "Produces an sha1sum file for all the objects in the path." slug: rclone_sha1sum url: /commands/rclone_sha1sum/ +versionIntroduced: v1.27 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/sha1sum/ and as part of making a release run "make commanddocs" --- # rclone sha1sum diff --git a/docs/content/commands/rclone_size.md b/docs/content/commands/rclone_size.md index 7f75fe981..ad58f6f11 100644 --- a/docs/content/commands/rclone_size.md +++ b/docs/content/commands/rclone_size.md @@ -3,6 +3,7 @@ title: "rclone size" description: "Prints the total size and number of objects in remote:path." slug: rclone_size url: /commands/rclone_size/ +versionIntroduced: v1.23 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/size/ and as part of making a release run "make commanddocs" --- # rclone size diff --git a/docs/content/commands/rclone_test.md b/docs/content/commands/rclone_test.md index d04ccb803..4eed2b262 100644 --- a/docs/content/commands/rclone_test.md +++ b/docs/content/commands/rclone_test.md @@ -3,6 +3,7 @@ title: "rclone test" description: "Run a test command" slug: rclone_test url: /commands/rclone_test/ +versionIntroduced: v1.55 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/ and as part of making a release run "make commanddocs" --- # rclone test diff --git a/docs/content/commands/rclone_test_changenotify.md b/docs/content/commands/rclone_test_changenotify.md index e5f1024de..1f87bc682 100644 --- a/docs/content/commands/rclone_test_changenotify.md +++ b/docs/content/commands/rclone_test_changenotify.md @@ -3,6 +3,7 @@ title: "rclone test changenotify" description: "Log any change notify requests for the remote passed in." slug: rclone_test_changenotify url: /commands/rclone_test_changenotify/ +versionIntroduced: v1.56 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/changenotify/ and as part of making a release run "make commanddocs" --- # rclone test changenotify @@ -17,7 +18,7 @@ rclone test changenotify remote: [flags] ``` -h, --help help for changenotify - --poll-interval duration Time to wait between polling for changes (default 10s) + --poll-interval Duration Time to wait between polling for changes (default 10s) ``` See the [global flags page](/flags/) for global options not listed here. diff --git a/docs/content/commands/rclone_test_histogram.md b/docs/content/commands/rclone_test_histogram.md index b87ac83a0..493007d99 100644 --- a/docs/content/commands/rclone_test_histogram.md +++ b/docs/content/commands/rclone_test_histogram.md @@ -3,6 +3,7 @@ title: "rclone test histogram" description: "Makes a histogram of file name characters." slug: rclone_test_histogram url: /commands/rclone_test_histogram/ +versionIntroduced: v1.55 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/histogram/ and as part of making a release run "make commanddocs" --- # rclone test histogram diff --git a/docs/content/commands/rclone_test_info.md b/docs/content/commands/rclone_test_info.md index d8791d7c1..1a16e6242 100644 --- a/docs/content/commands/rclone_test_info.md +++ b/docs/content/commands/rclone_test_info.md @@ -3,6 +3,7 @@ title: "rclone test info" description: "Discovers file name or other limitations for paths." slug: rclone_test_info url: /commands/rclone_test_info/ +versionIntroduced: v1.55 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/info/ and as part of making a release run "make commanddocs" --- # rclone test info @@ -32,7 +33,7 @@ rclone test info [remote:path]+ [flags] --check-normalization Check UTF-8 Normalization --check-streaming Check uploads with indeterminate file size -h, --help help for info - --upload-wait duration Wait after writing a file + --upload-wait Duration Wait after writing a file (default 0s) --write-json string Write results to file ``` diff --git a/docs/content/commands/rclone_test_makefile.md b/docs/content/commands/rclone_test_makefile.md index 5acddb5c1..4eb0977e3 100644 --- a/docs/content/commands/rclone_test_makefile.md +++ b/docs/content/commands/rclone_test_makefile.md @@ -3,6 +3,7 @@ title: "rclone test makefile" description: "Make files with random contents of the size given" slug: rclone_test_makefile url: /commands/rclone_test_makefile/ +versionIntroduced: v1.59 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/makefile/ and as part of making a release run "make commanddocs" --- # rclone test makefile diff --git a/docs/content/commands/rclone_test_makefiles.md b/docs/content/commands/rclone_test_makefiles.md index ad8e3f14b..4211bed96 100644 --- a/docs/content/commands/rclone_test_makefiles.md +++ b/docs/content/commands/rclone_test_makefiles.md @@ -3,6 +3,7 @@ title: "rclone test makefiles" description: "Make a random file hierarchy in a directory" slug: rclone_test_makefiles url: /commands/rclone_test_makefiles/ +versionIntroduced: v1.55 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/makefiles/ and as part of making a release run "make commanddocs" --- # rclone test makefiles @@ -21,6 +22,7 @@ rclone test makefiles [flags] --files int Number of files to create (default 1000) --files-per-directory int Average number of files per directory (default 10) -h, --help help for makefiles + --max-depth int Maximum depth of directory hierarchy (default 10) --max-file-size SizeSuffix Maximum size of files to create (default 100) --max-name-length int Maximum size of file names (default 12) --min-file-size SizeSuffix Minimum size of file to create diff --git a/docs/content/commands/rclone_test_memory.md b/docs/content/commands/rclone_test_memory.md index 104e9f24d..5d527fe20 100644 --- a/docs/content/commands/rclone_test_memory.md +++ b/docs/content/commands/rclone_test_memory.md @@ -3,6 +3,7 @@ title: "rclone test memory" description: "Load all the objects at remote:path into memory and report memory stats." slug: rclone_test_memory url: /commands/rclone_test_memory/ +versionIntroduced: v1.55 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/memory/ and as part of making a release run "make commanddocs" --- # rclone test memory diff --git a/docs/content/commands/rclone_touch.md b/docs/content/commands/rclone_touch.md index 556a15fec..0882cc34a 100644 --- a/docs/content/commands/rclone_touch.md +++ b/docs/content/commands/rclone_touch.md @@ -3,6 +3,7 @@ title: "rclone touch" description: "Create new file or change file modification time." slug: rclone_touch url: /commands/rclone_touch/ +versionIntroduced: v1.39 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/touch/ and as part of making a release run "make commanddocs" --- # rclone touch diff --git a/docs/content/commands/rclone_tree.md b/docs/content/commands/rclone_tree.md index 3ecc450e5..277f05e80 100644 --- a/docs/content/commands/rclone_tree.md +++ b/docs/content/commands/rclone_tree.md @@ -3,6 +3,7 @@ title: "rclone tree" description: "List the contents of the remote in a tree like fashion." slug: rclone_tree url: /commands/rclone_tree/ +versionIntroduced: v1.38 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/tree/ and as part of making a release run "make commanddocs" --- # rclone tree diff --git a/docs/content/commands/rclone_version.md b/docs/content/commands/rclone_version.md index 3f16a70ff..c2d424ee3 100644 --- a/docs/content/commands/rclone_version.md +++ b/docs/content/commands/rclone_version.md @@ -3,6 +3,7 @@ title: "rclone version" description: "Show the version number." slug: rclone_version url: /commands/rclone_version/ +versionIntroduced: v1.33 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/version/ and as part of making a release run "make commanddocs" --- # rclone version diff --git a/docs/content/flags.md b/docs/content/flags.md index c94a9e904..48771a909 100644 --- a/docs/content/flags.md +++ b/docs/content/flags.md @@ -27,10 +27,10 @@ These flags are available for every command. -c, --checksum Skip based on checksum (if available) & size, not mod-time & size --client-cert string Client SSL certificate (PEM) for mutual TLS auth --client-key string Client SSL private key (PEM) for mutual TLS auth - --color Define when colors (and other ANSI codes) should be shown AUTO|ALWAYS|NEVER (default AUTO) + --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO") --compare-dest stringArray Include additional comma separated server-side paths during comparison --config string Config file (default "$HOME/.config/rclone/rclone.conf") - --contimeout duration Connect timeout (default 1m0s) + --contimeout Duration Connect timeout (default 1m0s) --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cpuprofile string Write cpu profile to file --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD") @@ -48,16 +48,16 @@ These flags are available for every command. --dump-headers Dump HTTP headers - may contain sensitive info --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts --exclude stringArray Exclude files matching pattern - --exclude-from stringArray Read exclude patterns from file (use - to read from stdin) + --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-if-present stringArray Exclude directories if filename is present - --expect-continue-timeout duration Timeout when using expect / 100-continue in HTTP (default 1s) + --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s) --fast-list Use recursive list if available; uses more memory but fewer transactions --files-from stringArray Read list of source-file names from file (use - to read from stdin) --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin) - -f, --filter stringArray Add a file-filtering rule - --filter-from stringArray Read filtering patterns from a file (use - to read from stdin) - --fs-cache-expire-duration duration Cache remotes for this long (0 to disable caching) (default 5m0s) - --fs-cache-expire-interval duration Interval to check for expired remotes (default 1m0s) + -f, --filter stringArray Add a file filtering rule + --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin) + --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s) + --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s) --header stringArray Set HTTP header for all transactions --header-download stringArray Set HTTP header for download transactions --header-upload stringArray Set HTTP header for upload transactions @@ -71,9 +71,9 @@ These flags are available for every command. -I, --ignore-times Don't skip files that match size and time - transfer all files --immutable Do not modify files, fail if existing files have been modified --include stringArray Include files matching pattern - --include-from stringArray Read include patterns from file (use - to read from stdin) + --include-from stringArray Read file include patterns from file (use - to read from stdin) -i, --interactive Enable interactive mode - --kv-lock-time duration Maximum time to keep key-value database locked by process (default 1s) + --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s) --log-file string Log everything to this file --log-format string Comma separated list of log format options (default "date,time") --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE") @@ -83,16 +83,22 @@ These flags are available for every command. --max-backlog int Maximum number of objects in sync or check backlog (default 10000) --max-delete int When synchronizing, limit the number of deletes (default -1) --max-depth int If set limits the recursion depth to this (default -1) - --max-duration duration Maximum duration rclone will transfer data for + --max-duration Duration Maximum duration rclone will transfer data for (default 0s) --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off) --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000) --max-transfer SizeSuffix Maximum size of data to transfer (default off) --memprofile string Write memory profile to file -M, --metadata If set, preserve metadata when copying objects + --metadata-exclude stringArray Exclude metadatas matching pattern + --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin) + --metadata-filter stringArray Add a metadata filtering rule + --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin) + --metadata-include stringArray Include metadatas matching pattern + --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin) --metadata-set stringArray Add metadata key=value when uploading --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) - --modify-window duration Max time diff to be considered the same (default 1ns) + --modify-window Duration Max time diff to be considered the same (default 1ns) --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi) --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4) --no-check-certificate Do not verify the server SSL certificate (insecure) @@ -108,25 +114,26 @@ These flags are available for every command. --progress-terminal-title Show progress on the terminal title (requires -P/--progress) -q, --quiet Print as little stuff as possible --rc Enable the remote control server - --rc-addr string IPaddress:Port or :Port to bind server to (default "localhost:5572") + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) --rc-allow-origin string Set the allowed origin for CORS --rc-baseurl string Prefix for URLs - leave blank for root - --rc-cert string SSL PEM key (concatenation of certificate and CA certificate) + --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with --rc-enable-metrics Enable prometheus metrics on /metrics --rc-files string Path to local files to serve on the HTTP server - --rc-htpasswd string htpasswd file - if not provided no authentication is done - --rc-job-expire-duration duration Expire finished async jobs older than this value (default 1m0s) - --rc-job-expire-interval duration Interval to check for expired async jobs (default 10s) - --rc-key string SSL PEM Private key + --rc-htpasswd string A htpasswd file - if not provided no authentication is done + --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) + --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s) + --rc-key string TLS PEM Private key --rc-max-header-bytes int Maximum size of request header (default 4096) --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") --rc-no-auth Don't require auth for certain methods --rc-pass string Password for authentication - --rc-realm string Realm for authentication (default "rclone") + --rc-realm string Realm for authentication + --rc-salt string Password hashing salt (default "dlPL2MqE") --rc-serve Enable the serving of remote objects - --rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --rc-template string User-specified template --rc-user string User name for authentication --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest") @@ -136,10 +143,10 @@ These flags are available for every command. --rc-web-gui-update Check and update to latest version of web gui --refresh-times Refresh the modtime of remote files --retries int Retry operations this many times if they fail (default 3) - --retries-sleep duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) + --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s) --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs --size-only Skip based on size only, not mod-time or checksum - --stats duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s) + --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s) --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45) --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO") --stats-one-line Make the stats fit on one line @@ -152,7 +159,7 @@ These flags are available for every command. --syslog Use Syslog for logging --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON") --temp-dir string Directory rclone will use for temporary files (default "/tmp") - --timeout duration IO idle timeout (default 5m0s) + --timeout Duration IO idle timeout (default 5m0s) --tpslimit float Limit HTTP transactions per second to this --tpslimit-burst int Max burst of transactions for --tpslimit (default 1) --track-renames When synchronizing, track file renames and do a server-side move if possible @@ -163,7 +170,7 @@ These flags are available for every command. --use-json-log Use json log format --use-mmap Use mmap allocator (see docs) --use-server-modtime Use server modified time instead of object metadata - --user-agent string Set the user-agent to a specified string (default "rclone/v1.60.0") + --user-agent string Set the user-agent to a specified string (default "rclone/v1.61.0") -v, --verbose count Print lots more stuff (repeat for more) ``` @@ -173,527 +180,541 @@ These flags are available for every command. They control the backends and may be set in the config file. ``` - --acd-auth-url string Auth server URL - --acd-client-id string OAuth Client Id - --acd-client-secret string OAuth Client Secret - --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi) - --acd-token string OAuth Access Token as a JSON blob - --acd-token-url string Token server url - --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s) - --alias-remote string Remote or path to alias - --azureblob-access-tier string Access tier of blob: hot, cool or archive - --azureblob-account string Storage Account Name - --azureblob-archive-tier-delete Delete archive tier blobs before overwriting - --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi) - --azureblob-disable-checksum Don't store MD5 checksum with object metadata - --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) - --azureblob-endpoint string Endpoint for the service - --azureblob-key string Storage Account Key - --azureblob-list-chunk int Size of blob list (default 5000) - --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any - --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any - --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any - --azureblob-no-head-object If set, do not do HEAD before GET when getting objects - --azureblob-public-access string Public access level of a container: blob or container - --azureblob-sas-url string SAS URL for container level access only - --azureblob-service-principal-file string Path to file containing credentials for use with a service principal - --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16) - --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated) - --azureblob-use-emulator Uses local storage emulator if provided as 'true' - --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) - --b2-account string Account ID or Application Key ID - --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi) - --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi) - --b2-disable-checksum Disable checksums for large (> upload cutoff) files - --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w) - --b2-download-url string Custom endpoint for downloads - --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --b2-endpoint string Endpoint for the service - --b2-hard-delete Permanently delete files on remote removal, otherwise hide files - --b2-key string Application Key - --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging - --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --b2-version-at Time Show file versions as they were at the specified time (default off) - --b2-versions Include old versions in directory listings - --box-access-token string Box App Primary Access Token - --box-auth-url string Auth server URL - --box-box-config-file string Box App config.json location - --box-box-sub-type string (default "user") - --box-client-id string OAuth Client Id - --box-client-secret string OAuth Client Secret - --box-commit-retries int Max number of times to try committing a multipart file (default 100) - --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot) - --box-list-chunk int Size of listing chunk 1-1000 (default 1000) - --box-owned-by string Only show items owned by the login (email address) passed in - --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point - --box-token string OAuth Access Token as a JSON blob - --box-token-url string Token server url - --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi) - --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s) - --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming - --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend") - --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi) - --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi) - --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend") - --cache-db-purge Clear all the cached data for this remote on start - --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) - --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s) - --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server - --cache-plex-password string The password of the Plex user (obscured) - --cache-plex-url string The URL of the Plex server - --cache-plex-username string The username of the Plex user - --cache-read-retries int How many times to retry a read from a cache storage (default 10) - --cache-remote string Remote to cache - --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1) - --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded - --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s) - --cache-workers int How many workers should run in parallel to download chunks (default 4) - --cache-writes Cache file data on writes through the FS - --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi) - --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks - --chunker-hash-type string Choose how chunker handles hash sums (default "md5") - --chunker-remote string Remote to chunk/unchunk - --combine-upstreams SpaceSepList Upstreams for combining - --compress-level int GZIP compression level (-2 to 9) (default -1) - --compress-mode string Compression mode (default "gzip") - --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi) - --compress-remote string Remote to compress - -L, --copy-links Follow symlinks and copy the pointed to item - --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true) - --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32") - --crypt-filename-encryption string How to encrypt the filenames (default "standard") - --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted - --crypt-password string Password or pass phrase for encryption (obscured) - --crypt-password2 string Password or pass phrase for salt (obscured) - --crypt-remote string Remote to encrypt/decrypt - --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs - --crypt-show-mapping For all files listed show how the names encrypt - --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded - --drive-allow-import-name-change Allow the filetype to change when uploading Google docs - --drive-auth-owner-only Only consider files owned by the authenticated user - --drive-auth-url string Auth server URL - --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi) - --drive-client-id string Google Application Client Id - --drive-client-secret string OAuth Client Secret - --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut - --drive-disable-http2 Disable drive using http2 (default true) - --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8) - --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg") - --drive-formats string Deprecated: See export_formats - --drive-impersonate string Impersonate this user when using a service account - --drive-import-formats string Comma separated list of preferred formats for uploading Google docs - --drive-keep-revision-forever Keep new head revision of each file forever - --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000) - --drive-pacer-burst int Number of API calls to allow without sleeping (default 100) - --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms) - --drive-resource-key string Resource key for accessing a link-shared file - --drive-root-folder-id string ID of the root folder - --drive-scope string Scope that rclone should use when requesting access from drive - --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs - --drive-service-account-credentials string Service Account Credentials JSON blob - --drive-service-account-file string Service Account Credentials JSON file path - --drive-shared-with-me Only show files that are shared with me - --drive-size-as-quota Show sizes as storage quota usage, not actual size - --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only - --drive-skip-dangling-shortcuts If set skip dangling shortcut files - --drive-skip-gdocs Skip google documents in all listings - --drive-skip-shortcuts If set skip shortcut files - --drive-starred-only Only show files that are starred - --drive-stop-on-download-limit Make download limit errors be fatal - --drive-stop-on-upload-limit Make upload limit errors be fatal - --drive-team-drive string ID of the Shared Drive (Team Drive) - --drive-token string OAuth Access Token as a JSON blob - --drive-token-url string Token server url - --drive-trashed-only Only show files that are in the trash - --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi) - --drive-use-created-date Use file created date instead of modified date - --drive-use-shared-date Use date file was shared instead of modified date - --drive-use-trash Send files to the trash instead of deleting permanently (default true) - --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off) - --dropbox-auth-url string Auth server URL - --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) - --dropbox-batch-mode string Upload file batching sync|async|off (default "sync") - --dropbox-batch-size int Max number of files in upload batch - --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) - --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi) - --dropbox-client-id string OAuth Client Id - --dropbox-client-secret string OAuth Client Secret - --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot) - --dropbox-impersonate string Impersonate this user when using a business account - --dropbox-shared-files Instructs rclone to work on individual shared files - --dropbox-shared-folders Instructs rclone to work on shared folders - --dropbox-token string OAuth Access Token as a JSON blob - --dropbox-token-url string Token server url - --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl - --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot) - --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured) - --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured) - --fichier-shared-folder string If you want to download a shared folder, add this parameter - --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) - --filefabric-permanent-token string Permanent Authentication Token - --filefabric-root-folder-id string ID of the root folder - --filefabric-token string Session Token - --filefabric-token-expiry string Token expiry time - --filefabric-url string URL of the Enterprise File Fabric to connect to - --filefabric-version string Version read from the file fabric - --ftp-ask-password Allow asking for FTP password when needed - --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) - --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited - --ftp-disable-epsv Disable using EPSV even if server advertises support - --ftp-disable-mlsd Disable using MLSD even if server advertises support - --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) - --ftp-disable-utf8 Disable using UTF-8 even if server advertises support - --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot) - --ftp-explicit-tls Use Explicit FTPS (FTP over TLS) - --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD - --ftp-host string FTP host to connect to - --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --ftp-no-check-certificate Do not verify the TLS certificate of the server - --ftp-pass string FTP password (obscured) - --ftp-port int FTP port number (default 21) - --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s) - --ftp-tls Use Implicit FTPS (FTP over TLS) - --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32) - --ftp-user string FTP username (default "$USER") - --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk) - --gcs-anonymous Access public buckets and objects without credentials - --gcs-auth-url string Auth server URL - --gcs-bucket-acl string Access Control List for new buckets - --gcs-bucket-policy-only Access checks should use bucket-level IAM policies - --gcs-client-id string OAuth Client Id - --gcs-client-secret string OAuth Client Secret - --gcs-decompress If set this will decompress gzip encoded objects - --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) - --gcs-endpoint string Endpoint for the service - --gcs-location string Location for the newly created buckets - --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it - --gcs-object-acl string Access Control List for new objects - --gcs-project-number string Project number - --gcs-service-account-file string Service Account Credentials JSON file path - --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage - --gcs-token string OAuth Access Token as a JSON blob - --gcs-token-url string Token server url - --gphotos-auth-url string Auth server URL - --gphotos-client-id string OAuth Client Id - --gphotos-client-secret string OAuth Client Secret - --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) - --gphotos-include-archived Also view and download archived media - --gphotos-read-only Set to make the Google Photos backend read only - --gphotos-read-size Set to read the size of media items - --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000) - --gphotos-token string OAuth Access Token as a JSON blob - --gphotos-token-url string Token server url - --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default) - --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1) - --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off) - --hasher-remote string Remote to cache checksums for (e.g. myRemote:path) - --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy - --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot) - --hdfs-namenode string Hadoop name node and port - --hdfs-service-principal-name string Kerberos service principal name for the namenode - --hdfs-username string Hadoop user name - --hidrive-auth-url string Auth server URL - --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi) - --hidrive-client-id string OAuth Client Id - --hidrive-client-secret string OAuth Client Secret - --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary - --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot) - --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1") - --hidrive-root-prefix string The root/parent folder for all paths (default "/") - --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw") - --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user") - --hidrive-token string OAuth Access Token as a JSON blob - --hidrive-token-url string Token server url - --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4) - --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi) - --http-headers CommaSepList Set HTTP headers for all transactions - --http-no-head Don't use HEAD requests - --http-no-slash Set this if the site doesn't end directories with / - --http-url string URL of HTTP host to connect to - --internetarchive-access-key-id string IAS3 Access Key - --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true) - --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot) - --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org") - --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org") - --internetarchive-secret-access-key string IAS3 Secret Key (password) - --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s) - --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot) - --jottacloud-hard-delete Delete files permanently rather than putting them into the trash - --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi) - --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them - --jottacloud-trashed-only Only show files that are in the trash - --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi) - --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --koofr-endpoint string The Koofr API endpoint to use - --koofr-mountid string Mount ID of the mount to use - --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured) - --koofr-provider string Choose your storage provider - --koofr-setmtime Does the backend support setting modification time (default true) - --koofr-user string Your user name - -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension - --local-case-insensitive Force the filesystem to report itself as case insensitive - --local-case-sensitive Force the filesystem to report itself as case sensitive - --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot) - --local-no-check-updated Don't check to see if the files change during upload - --local-no-preallocate Disable preallocation of disk space for transferred files - --local-no-set-modtime Disable setting modtime - --local-no-sparse Disable sparse files for multi-thread downloads - --local-nounc Disable UNC (long path names) conversion on Windows - --local-unicode-normalization Apply unicode NFC normalization to paths and filenames - --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated) - --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true) - --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --mailru-pass string Password (obscured) - --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true) - --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf") - --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi) - --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi) - --mailru-user string User name (usually email) - --mega-debug Output more debug from Mega - --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --mega-hard-delete Delete files permanently rather than putting them into the trash - --mega-pass string Password (obscured) - --mega-user string User name - --netstorage-account string Set the NetStorage account name - --netstorage-host string Domain+path of NetStorage host to connect to - --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https") - --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured) - -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only) - --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access) - --onedrive-auth-url string Auth server URL - --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi) - --onedrive-client-id string OAuth Client Id - --onedrive-client-secret string OAuth Client Secret - --onedrive-drive-id string The ID of the drive to use - --onedrive-drive-type string The type of the drive (personal | business | documentLibrary) - --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot) - --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings - --onedrive-link-password string Set the password for links created by the link command - --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous") - --onedrive-link-type string Set the type of the links created by the link command (default "view") - --onedrive-list-chunk int Size of listing chunk (default 1000) - --onedrive-no-versions Remove all versions on modifying operations - --onedrive-region string Choose national cloud region for OneDrive (default "global") - --onedrive-root-folder-id string ID of the root folder - --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs - --onedrive-token string OAuth Access Token as a JSON blob - --onedrive-token-url string Token server url - --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) - --oos-compartment string Object storage compartment OCID - --oos-config-file string Path to OCI config file (default "~/.oci/config") - --oos-config-profile string Profile name inside the oci config file (default "Default") - --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) - --oos-copy-timeout Duration Timeout for copy (default 1m0s) - --oos-disable-checksum Don't store MD5 checksum with object metadata - --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --oos-endpoint string Endpoint for Object storage API - --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery - --oos-namespace string Object storage namespace - --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it - --oos-provider string Choose your Auth Provider (default "env_auth") - --oos-region string Object storage Region - --oos-upload-concurrency int Concurrency for multipart uploads (default 10) - --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi) - --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) - --opendrive-password string Password (obscured) - --opendrive-username string Username - --pcloud-auth-url string Auth server URL - --pcloud-client-id string OAuth Client Id - --pcloud-client-secret string OAuth Client Secret - --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --pcloud-hostname string Hostname to connect to (default "api.pcloud.com") - --pcloud-password string Your pcloud password (obscured) - --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0") - --pcloud-token string OAuth Access Token as a JSON blob - --pcloud-token-url string Token server url - --pcloud-username string Your pcloud username - --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --qingstor-access-key-id string QingStor Access Key ID - --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi) - --qingstor-connection-retries int Number of connection retries (default 3) - --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8) - --qingstor-endpoint string Enter an endpoint URL to connection QingStor API - --qingstor-env-auth Get QingStor credentials from runtime - --qingstor-secret-access-key string QingStor Secret Access Key (password) - --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1) - --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --qingstor-zone string Zone to connect to - --s3-access-key-id string AWS Access Key ID - --s3-acl string Canned ACL used when creating buckets and storing or copying objects - --s3-bucket-acl string Canned ACL used when creating buckets - --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) - --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) - --s3-decompress If set this will decompress gzip encoded objects - --s3-disable-checksum Don't store MD5 checksum with object metadata - --s3-disable-http2 Disable usage of http2 for S3 backends - --s3-download-url string Custom endpoint for downloads - --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --s3-endpoint string Endpoint for S3 API - --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars) - --s3-force-path-style If true use path style access if false use virtual hosted style (default true) - --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery - --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000) - --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset) - --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto - --s3-location-constraint string Location constraint - must be set to match the Region - --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000) - --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it - --s3-no-head If set, don't HEAD uploaded objects to check integrity - --s3-no-head-object If set, do not do HEAD before GET when getting objects - --s3-no-system-metadata Suppress setting and reading of system metadata - --s3-profile string Profile to use in the shared credentials file - --s3-provider string Choose your S3 provider - --s3-region string Region to connect to - --s3-requester-pays Enables requester pays option when interacting with S3 bucket - --s3-secret-access-key string AWS Secret Access Key (password) - --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 - --s3-session-token string An AWS session token - --s3-shared-credentials-file string Path to the shared credentials file - --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3 - --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data - --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data - --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) - --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key - --s3-storage-class string The storage class to use when storing new objects in S3 - --s3-upload-concurrency int Concurrency for multipart uploads (default 4) - --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint - --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) - --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads - --s3-v2-auth If true use v2 authentication - --s3-version-at Time Show file versions as they were at the specified time (default off) - --s3-versions Include old versions in directory listings - --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled) - --seafile-create-library Should rclone create a library if it doesn't exist - --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8) - --seafile-library string Name of the library - --seafile-library-key string Library password (for encrypted libraries only) (obscured) - --seafile-pass string Password (obscured) - --seafile-url string URL of seafile host to connect to - --seafile-user string User name (usually email address) - --sftp-ask-password Allow asking for SFTP password when needed - --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki) - --sftp-concurrency int The maximum number of outstanding requests for one file (default 64) - --sftp-disable-concurrent-reads If set don't use concurrent reads - --sftp-disable-concurrent-writes If set don't use concurrent writes - --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available - --sftp-host string SSH host to connect to - --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --sftp-key-file string Path to PEM-encoded private key file - --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured) - --sftp-key-pem string Raw PEM-encoded private key - --sftp-key-use-agent When set forces the usage of the ssh-agent - --sftp-known-hosts-file string Optional path to known_hosts file - --sftp-md5sum-command string The command used to read md5 hashes - --sftp-pass string SSH password, leave blank to use ssh-agent (obscured) - --sftp-path-override string Override path used by SSH shell commands - --sftp-port int SSH port number (default 22) - --sftp-pubkey-file string Optional path to public key file - --sftp-server-command string Specifies the path or command to run a sftp server on the remote host - --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands - --sftp-set-modtime Set the modified time on the remote if set (default true) - --sftp-sha1sum-command string The command used to read sha1 hashes - --sftp-shell-type string The type of SSH shell on remote server, if any - --sftp-skip-links Set to skip any symlinks and any other non regular files - --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp") - --sftp-use-fstat If set use fstat instead of stat - --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods - --sftp-user string SSH username (default "$USER") - --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi) - --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot) - --sharefile-endpoint string Endpoint for API calls - --sharefile-root-folder-id string ID of the root folder - --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi) - --sia-api-password string Sia Daemon API Password (obscured) - --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980") - --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot) - --sia-user-agent string Siad User Agent (default "Sia-Agent") - --skip-links Don't warn about skipped symlinks - --smb-case-insensitive Whether the server is configured to be case-insensitive (default true) - --smb-domain string Domain name for NTLM authentication (default "WORKGROUP") - --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot) - --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true) - --smb-host string SMB server hostname to connect to - --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --smb-pass string SMB password (obscured) - --smb-port int SMB port number (default 445) - --smb-user string SMB username (default "$USER") - --storj-access-grant string Access grant - --storj-api-key string API key - --storj-passphrase string Encryption passphrase - --storj-provider string Choose an authentication method (default "existing") - --storj-satellite-address string Satellite address (default "us-central-1.storj.io") - --sugarsync-access-key-id string Sugarsync Access Key ID - --sugarsync-app-id string Sugarsync App ID - --sugarsync-authorization string Sugarsync authorization - --sugarsync-authorization-expiry string Sugarsync authorization expiry - --sugarsync-deleted-id string Sugarsync deleted folder id - --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot) - --sugarsync-hard-delete Permanently delete files if true - --sugarsync-private-access-key string Sugarsync Private Access Key - --sugarsync-refresh-token string Sugarsync refresh token - --sugarsync-root-id string Sugarsync root id - --sugarsync-user string Sugarsync user - --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID) - --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME) - --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET) - --swift-auth string Authentication URL for server (OS_AUTH_URL) - --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) - --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) - --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi) - --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) - --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8) - --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") - --swift-env-auth Get swift credentials from environment variables in standard OpenStack form - --swift-key string API key or password (OS_PASSWORD) - --swift-leave-parts-on-error If true avoid calling abort upload on a failure - --swift-no-chunk Don't chunk files during streaming upload - --swift-no-large-objects Disable support for static and dynamic large objects - --swift-region string Region name - optional (OS_REGION_NAME) - --swift-storage-policy string The storage policy to use when creating a new container - --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) - --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) - --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) - --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) - --swift-user string User name to log in (OS_USERNAME) - --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID) - --union-action-policy string Policy to choose upstream on ACTION category (default "epall") - --union-cache-time int Cache time of usage and free space (in seconds) (default 120) - --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs") - --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi) - --union-search-policy string Policy to choose upstream on SEARCH category (default "ff") - --union-upstreams string List of space separated upstreams - --uptobox-access-token string Your access token - --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) - --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) - --webdav-bearer-token-command string Command to run to get a bearer token - --webdav-encoding string The encoding for the backend - --webdav-headers CommaSepList Set HTTP headers for all transactions - --webdav-pass string Password (obscured) - --webdav-url string URL of http host to connect to - --webdav-user string User name - --webdav-vendor string Name of the WebDAV site/service/software you are using - --yandex-auth-url string Auth server URL - --yandex-client-id string OAuth Client Id - --yandex-client-secret string OAuth Client Secret - --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) - --yandex-hard-delete Delete files permanently rather than putting them into the trash - --yandex-token string OAuth Access Token as a JSON blob - --yandex-token-url string Token server url - --zoho-auth-url string Auth server URL - --zoho-client-id string OAuth Client Id - --zoho-client-secret string OAuth Client Secret - --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8) - --zoho-region string Zoho region to connect to - --zoho-token string OAuth Access Token as a JSON blob - --zoho-token-url string Token server url + --acd-auth-url string Auth server URL + --acd-client-id string OAuth Client Id + --acd-client-secret string OAuth Client Secret + --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi) + --acd-token string OAuth Access Token as a JSON blob + --acd-token-url string Token server url + --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s) + --alias-remote string Remote or path to alias + --azureblob-access-tier string Access tier of blob: hot, cool or archive + --azureblob-account string Azure Storage Account Name + --azureblob-archive-tier-delete Delete archive tier blobs before overwriting + --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi) + --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured) + --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key + --azureblob-client-id string The ID of the client in use + --azureblob-client-secret string One of the service principal's client secrets + --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth + --azureblob-disable-checksum Don't store MD5 checksum with object metadata + --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) + --azureblob-endpoint string Endpoint for the service + --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI) + --azureblob-key string Storage Account Shared Key + --azureblob-list-chunk int Size of blob list (default 5000) + --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any + --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any + --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any + --azureblob-no-check-container If set, don't attempt to check the container exists or create it + --azureblob-no-head-object If set, do not do HEAD before GET when getting objects + --azureblob-password string The user's password (obscured) + --azureblob-public-access string Public access level of a container: blob or container + --azureblob-sas-url string SAS URL for container level access only + --azureblob-service-principal-file string Path to file containing credentials for use with a service principal + --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID + --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16) + --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated) + --azureblob-use-emulator Uses local storage emulator if provided as 'true' + --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) + --azureblob-username string User name (usually an email address) + --b2-account string Account ID or Application Key ID + --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi) + --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi) + --b2-disable-checksum Disable checksums for large (> upload cutoff) files + --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w) + --b2-download-url string Custom endpoint for downloads + --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --b2-endpoint string Endpoint for the service + --b2-hard-delete Permanently delete files on remote removal, otherwise hide files + --b2-key string Application Key + --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging + --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --b2-version-at Time Show file versions as they were at the specified time (default off) + --b2-versions Include old versions in directory listings + --box-access-token string Box App Primary Access Token + --box-auth-url string Auth server URL + --box-box-config-file string Box App config.json location + --box-box-sub-type string (default "user") + --box-client-id string OAuth Client Id + --box-client-secret string OAuth Client Secret + --box-commit-retries int Max number of times to try committing a multipart file (default 100) + --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot) + --box-list-chunk int Size of listing chunk 1-1000 (default 1000) + --box-owned-by string Only show items owned by the login (email address) passed in + --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point + --box-token string OAuth Access Token as a JSON blob + --box-token-url string Token server url + --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi) + --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s) + --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming + --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend") + --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi) + --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi) + --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend") + --cache-db-purge Clear all the cached data for this remote on start + --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) + --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s) + --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server + --cache-plex-password string The password of the Plex user (obscured) + --cache-plex-url string The URL of the Plex server + --cache-plex-username string The username of the Plex user + --cache-read-retries int How many times to retry a read from a cache storage (default 10) + --cache-remote string Remote to cache + --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1) + --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded + --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s) + --cache-workers int How many workers should run in parallel to download chunks (default 4) + --cache-writes Cache file data on writes through the FS + --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi) + --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks + --chunker-hash-type string Choose how chunker handles hash sums (default "md5") + --chunker-remote string Remote to chunk/unchunk + --combine-upstreams SpaceSepList Upstreams for combining + --compress-level int GZIP compression level (-2 to 9) (default -1) + --compress-mode string Compression mode (default "gzip") + --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi) + --compress-remote string Remote to compress + -L, --copy-links Follow symlinks and copy the pointed to item + --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true) + --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32") + --crypt-filename-encryption string How to encrypt the filenames (default "standard") + --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted + --crypt-password string Password or pass phrase for encryption (obscured) + --crypt-password2 string Password or pass phrase for salt (obscured) + --crypt-remote string Remote to encrypt/decrypt + --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs + --crypt-show-mapping For all files listed show how the names encrypt + --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded + --drive-allow-import-name-change Allow the filetype to change when uploading Google docs + --drive-auth-owner-only Only consider files owned by the authenticated user + --drive-auth-url string Auth server URL + --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi) + --drive-client-id string Google Application Client Id + --drive-client-secret string OAuth Client Secret + --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut + --drive-disable-http2 Disable drive using http2 (default true) + --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8) + --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg") + --drive-formats string Deprecated: See export_formats + --drive-impersonate string Impersonate this user when using a service account + --drive-import-formats string Comma separated list of preferred formats for uploading Google docs + --drive-keep-revision-forever Keep new head revision of each file forever + --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000) + --drive-pacer-burst int Number of API calls to allow without sleeping (default 100) + --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms) + --drive-resource-key string Resource key for accessing a link-shared file + --drive-root-folder-id string ID of the root folder + --drive-scope string Scope that rclone should use when requesting access from drive + --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs + --drive-service-account-credentials string Service Account Credentials JSON blob + --drive-service-account-file string Service Account Credentials JSON file path + --drive-shared-with-me Only show files that are shared with me + --drive-size-as-quota Show sizes as storage quota usage, not actual size + --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only + --drive-skip-dangling-shortcuts If set skip dangling shortcut files + --drive-skip-gdocs Skip google documents in all listings + --drive-skip-shortcuts If set skip shortcut files + --drive-starred-only Only show files that are starred + --drive-stop-on-download-limit Make download limit errors be fatal + --drive-stop-on-upload-limit Make upload limit errors be fatal + --drive-team-drive string ID of the Shared Drive (Team Drive) + --drive-token string OAuth Access Token as a JSON blob + --drive-token-url string Token server url + --drive-trashed-only Only show files that are in the trash + --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi) + --drive-use-created-date Use file created date instead of modified date + --drive-use-shared-date Use date file was shared instead of modified date + --drive-use-trash Send files to the trash instead of deleting permanently (default true) + --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off) + --dropbox-auth-url string Auth server URL + --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) + --dropbox-batch-mode string Upload file batching sync|async|off (default "sync") + --dropbox-batch-size int Max number of files in upload batch + --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) + --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi) + --dropbox-client-id string OAuth Client Id + --dropbox-client-secret string OAuth Client Secret + --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot) + --dropbox-impersonate string Impersonate this user when using a business account + --dropbox-shared-files Instructs rclone to work on individual shared files + --dropbox-shared-folders Instructs rclone to work on shared folders + --dropbox-token string OAuth Access Token as a JSON blob + --dropbox-token-url string Token server url + --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl + --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot) + --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured) + --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured) + --fichier-shared-folder string If you want to download a shared folder, add this parameter + --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) + --filefabric-permanent-token string Permanent Authentication Token + --filefabric-root-folder-id string ID of the root folder + --filefabric-token string Session Token + --filefabric-token-expiry string Token expiry time + --filefabric-url string URL of the Enterprise File Fabric to connect to + --filefabric-version string Version read from the file fabric + --ftp-ask-password Allow asking for FTP password when needed + --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) + --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited + --ftp-disable-epsv Disable using EPSV even if server advertises support + --ftp-disable-mlsd Disable using MLSD even if server advertises support + --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) + --ftp-disable-utf8 Disable using UTF-8 even if server advertises support + --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot) + --ftp-explicit-tls Use Explicit FTPS (FTP over TLS) + --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD + --ftp-host string FTP host to connect to + --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --ftp-no-check-certificate Do not verify the TLS certificate of the server + --ftp-pass string FTP password (obscured) + --ftp-port int FTP port number (default 21) + --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s) + --ftp-tls Use Implicit FTPS (FTP over TLS) + --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32) + --ftp-user string FTP username (default "$USER") + --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk) + --gcs-anonymous Access public buckets and objects without credentials + --gcs-auth-url string Auth server URL + --gcs-bucket-acl string Access Control List for new buckets + --gcs-bucket-policy-only Access checks should use bucket-level IAM policies + --gcs-client-id string OAuth Client Id + --gcs-client-secret string OAuth Client Secret + --gcs-decompress If set this will decompress gzip encoded objects + --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) + --gcs-endpoint string Endpoint for the service + --gcs-location string Location for the newly created buckets + --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it + --gcs-object-acl string Access Control List for new objects + --gcs-project-number string Project number + --gcs-service-account-file string Service Account Credentials JSON file path + --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage + --gcs-token string OAuth Access Token as a JSON blob + --gcs-token-url string Token server url + --gphotos-auth-url string Auth server URL + --gphotos-client-id string OAuth Client Id + --gphotos-client-secret string OAuth Client Secret + --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) + --gphotos-include-archived Also view and download archived media + --gphotos-read-only Set to make the Google Photos backend read only + --gphotos-read-size Set to read the size of media items + --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000) + --gphotos-token string OAuth Access Token as a JSON blob + --gphotos-token-url string Token server url + --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default) + --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1) + --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off) + --hasher-remote string Remote to cache checksums for (e.g. myRemote:path) + --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy + --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot) + --hdfs-namenode string Hadoop name node and port + --hdfs-service-principal-name string Kerberos service principal name for the namenode + --hdfs-username string Hadoop user name + --hidrive-auth-url string Auth server URL + --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi) + --hidrive-client-id string OAuth Client Id + --hidrive-client-secret string OAuth Client Secret + --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary + --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot) + --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1") + --hidrive-root-prefix string The root/parent folder for all paths (default "/") + --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw") + --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user") + --hidrive-token string OAuth Access Token as a JSON blob + --hidrive-token-url string Token server url + --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4) + --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi) + --http-headers CommaSepList Set HTTP headers for all transactions + --http-no-head Don't use HEAD requests + --http-no-slash Set this if the site doesn't end directories with / + --http-url string URL of HTTP host to connect to + --internetarchive-access-key-id string IAS3 Access Key + --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true) + --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot) + --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org") + --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org") + --internetarchive-secret-access-key string IAS3 Secret Key (password) + --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s) + --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot) + --jottacloud-hard-delete Delete files permanently rather than putting them into the trash + --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi) + --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them + --jottacloud-trashed-only Only show files that are in the trash + --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi) + --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --koofr-endpoint string The Koofr API endpoint to use + --koofr-mountid string Mount ID of the mount to use + --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured) + --koofr-provider string Choose your storage provider + --koofr-setmtime Does the backend support setting modification time (default true) + --koofr-user string Your user name + -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension + --local-case-insensitive Force the filesystem to report itself as case insensitive + --local-case-sensitive Force the filesystem to report itself as case sensitive + --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot) + --local-no-check-updated Don't check to see if the files change during upload + --local-no-preallocate Disable preallocation of disk space for transferred files + --local-no-set-modtime Disable setting modtime + --local-no-sparse Disable sparse files for multi-thread downloads + --local-nounc Disable UNC (long path names) conversion on Windows + --local-unicode-normalization Apply unicode NFC normalization to paths and filenames + --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated) + --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true) + --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --mailru-pass string Password (obscured) + --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true) + --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf") + --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi) + --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi) + --mailru-user string User name (usually email) + --mega-debug Output more debug from Mega + --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --mega-hard-delete Delete files permanently rather than putting them into the trash + --mega-pass string Password (obscured) + --mega-user string User name + --netstorage-account string Set the NetStorage account name + --netstorage-host string Domain+path of NetStorage host to connect to + --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https") + --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured) + -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only) + --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access) + --onedrive-auth-url string Auth server URL + --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi) + --onedrive-client-id string OAuth Client Id + --onedrive-client-secret string OAuth Client Secret + --onedrive-drive-id string The ID of the drive to use + --onedrive-drive-type string The type of the drive (personal | business | documentLibrary) + --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot) + --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings + --onedrive-link-password string Set the password for links created by the link command + --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous") + --onedrive-link-type string Set the type of the links created by the link command (default "view") + --onedrive-list-chunk int Size of listing chunk (default 1000) + --onedrive-no-versions Remove all versions on modifying operations + --onedrive-region string Choose national cloud region for OneDrive (default "global") + --onedrive-root-folder-id string ID of the root folder + --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs + --onedrive-token string OAuth Access Token as a JSON blob + --onedrive-token-url string Token server url + --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) + --oos-compartment string Object storage compartment OCID + --oos-config-file string Path to OCI config file (default "~/.oci/config") + --oos-config-profile string Profile name inside the oci config file (default "Default") + --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) + --oos-copy-timeout Duration Timeout for copy (default 1m0s) + --oos-disable-checksum Don't store MD5 checksum with object metadata + --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --oos-endpoint string Endpoint for Object storage API + --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery + --oos-namespace string Object storage namespace + --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it + --oos-provider string Choose your Auth Provider (default "env_auth") + --oos-region string Object storage Region + --oos-upload-concurrency int Concurrency for multipart uploads (default 10) + --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi) + --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) + --opendrive-password string Password (obscured) + --opendrive-username string Username + --pcloud-auth-url string Auth server URL + --pcloud-client-id string OAuth Client Id + --pcloud-client-secret string OAuth Client Secret + --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --pcloud-hostname string Hostname to connect to (default "api.pcloud.com") + --pcloud-password string Your pcloud password (obscured) + --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0") + --pcloud-token string OAuth Access Token as a JSON blob + --pcloud-token-url string Token server url + --pcloud-username string Your pcloud username + --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --qingstor-access-key-id string QingStor Access Key ID + --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi) + --qingstor-connection-retries int Number of connection retries (default 3) + --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8) + --qingstor-endpoint string Enter an endpoint URL to connection QingStor API + --qingstor-env-auth Get QingStor credentials from runtime + --qingstor-secret-access-key string QingStor Secret Access Key (password) + --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1) + --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --qingstor-zone string Zone to connect to + --s3-access-key-id string AWS Access Key ID + --s3-acl string Canned ACL used when creating buckets and storing or copying objects + --s3-bucket-acl string Canned ACL used when creating buckets + --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) + --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) + --s3-decompress If set this will decompress gzip encoded objects + --s3-disable-checksum Don't store MD5 checksum with object metadata + --s3-disable-http2 Disable usage of http2 for S3 backends + --s3-download-url string Custom endpoint for downloads + --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --s3-endpoint string Endpoint for S3 API + --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars) + --s3-force-path-style If true use path style access if false use virtual hosted style (default true) + --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery + --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000) + --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset) + --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto + --s3-location-constraint string Location constraint - must be set to match the Region + --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000) + --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset) + --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it + --s3-no-head If set, don't HEAD uploaded objects to check integrity + --s3-no-head-object If set, do not do HEAD before GET when getting objects + --s3-no-system-metadata Suppress setting and reading of system metadata + --s3-profile string Profile to use in the shared credentials file + --s3-provider string Choose your S3 provider + --s3-region string Region to connect to + --s3-requester-pays Enables requester pays option when interacting with S3 bucket + --s3-secret-access-key string AWS Secret Access Key (password) + --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 + --s3-session-token string An AWS session token + --s3-shared-credentials-file string Path to the shared credentials file + --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3 + --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data + --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data + --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) + --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key + --s3-storage-class string The storage class to use when storing new objects in S3 + --s3-upload-concurrency int Concurrency for multipart uploads (default 4) + --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint + --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) + --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads + --s3-v2-auth If true use v2 authentication + --s3-version-at Time Show file versions as they were at the specified time (default off) + --s3-versions Include old versions in directory listings + --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled) + --seafile-create-library Should rclone create a library if it doesn't exist + --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8) + --seafile-library string Name of the library + --seafile-library-key string Library password (for encrypted libraries only) (obscured) + --seafile-pass string Password (obscured) + --seafile-url string URL of seafile host to connect to + --seafile-user string User name (usually email address) + --sftp-ask-password Allow asking for SFTP password when needed + --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki) + --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference + --sftp-concurrency int The maximum number of outstanding requests for one file (default 64) + --sftp-disable-concurrent-reads If set don't use concurrent reads + --sftp-disable-concurrent-writes If set don't use concurrent writes + --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available + --sftp-host string SSH host to connect to + --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference + --sftp-key-file string Path to PEM-encoded private key file + --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured) + --sftp-key-pem string Raw PEM-encoded private key + --sftp-key-use-agent When set forces the usage of the ssh-agent + --sftp-known-hosts-file string Optional path to known_hosts file + --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference + --sftp-md5sum-command string The command used to read md5 hashes + --sftp-pass string SSH password, leave blank to use ssh-agent (obscured) + --sftp-path-override string Override path used by SSH shell commands + --sftp-port int SSH port number (default 22) + --sftp-pubkey-file string Optional path to public key file + --sftp-server-command string Specifies the path or command to run a sftp server on the remote host + --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands + --sftp-set-modtime Set the modified time on the remote if set (default true) + --sftp-sha1sum-command string The command used to read sha1 hashes + --sftp-shell-type string The type of SSH shell on remote server, if any + --sftp-skip-links Set to skip any symlinks and any other non regular files + --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp") + --sftp-use-fstat If set use fstat instead of stat + --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods + --sftp-user string SSH username (default "$USER") + --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi) + --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot) + --sharefile-endpoint string Endpoint for API calls + --sharefile-root-folder-id string ID of the root folder + --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi) + --sia-api-password string Sia Daemon API Password (obscured) + --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980") + --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot) + --sia-user-agent string Siad User Agent (default "Sia-Agent") + --skip-links Don't warn about skipped symlinks + --smb-case-insensitive Whether the server is configured to be case-insensitive (default true) + --smb-domain string Domain name for NTLM authentication (default "WORKGROUP") + --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot) + --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true) + --smb-host string SMB server hostname to connect to + --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --smb-pass string SMB password (obscured) + --smb-port int SMB port number (default 445) + --smb-user string SMB username (default "$USER") + --storj-access-grant string Access grant + --storj-api-key string API key + --storj-passphrase string Encryption passphrase + --storj-provider string Choose an authentication method (default "existing") + --storj-satellite-address string Satellite address (default "us-central-1.storj.io") + --sugarsync-access-key-id string Sugarsync Access Key ID + --sugarsync-app-id string Sugarsync App ID + --sugarsync-authorization string Sugarsync authorization + --sugarsync-authorization-expiry string Sugarsync authorization expiry + --sugarsync-deleted-id string Sugarsync deleted folder id + --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot) + --sugarsync-hard-delete Permanently delete files if true + --sugarsync-private-access-key string Sugarsync Private Access Key + --sugarsync-refresh-token string Sugarsync refresh token + --sugarsync-root-id string Sugarsync root id + --sugarsync-user string Sugarsync user + --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID) + --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME) + --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET) + --swift-auth string Authentication URL for server (OS_AUTH_URL) + --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) + --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) + --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi) + --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) + --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8) + --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") + --swift-env-auth Get swift credentials from environment variables in standard OpenStack form + --swift-key string API key or password (OS_PASSWORD) + --swift-leave-parts-on-error If true avoid calling abort upload on a failure + --swift-no-chunk Don't chunk files during streaming upload + --swift-no-large-objects Disable support for static and dynamic large objects + --swift-region string Region name - optional (OS_REGION_NAME) + --swift-storage-policy string The storage policy to use when creating a new container + --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) + --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) + --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) + --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) + --swift-user string User name to log in (OS_USERNAME) + --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID) + --union-action-policy string Policy to choose upstream on ACTION category (default "epall") + --union-cache-time int Cache time of usage and free space (in seconds) (default 120) + --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs") + --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi) + --union-search-policy string Policy to choose upstream on SEARCH category (default "ff") + --union-upstreams string List of space separated upstreams + --uptobox-access-token string Your access token + --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) + --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) + --webdav-bearer-token-command string Command to run to get a bearer token + --webdav-encoding string The encoding for the backend + --webdav-headers CommaSepList Set HTTP headers for all transactions + --webdav-pass string Password (obscured) + --webdav-url string URL of http host to connect to + --webdav-user string User name + --webdav-vendor string Name of the WebDAV site/service/software you are using + --yandex-auth-url string Auth server URL + --yandex-client-id string OAuth Client Id + --yandex-client-secret string OAuth Client Secret + --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) + --yandex-hard-delete Delete files permanently rather than putting them into the trash + --yandex-token string OAuth Access Token as a JSON blob + --yandex-token-url string Token server url + --zoho-auth-url string Auth server URL + --zoho-client-id string OAuth Client Id + --zoho-client-secret string OAuth Client Secret + --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8) + --zoho-region string Zoho region to connect to + --zoho-token string OAuth Access Token as a JSON blob + --zoho-token-url string Token server url ``` diff --git a/docs/content/ftp.md b/docs/content/ftp.md index 563de4132..ca919fd37 100644 --- a/docs/content/ftp.md +++ b/docs/content/ftp.md @@ -217,7 +217,7 @@ Use Implicit FTPS (FTP over TLS). When using implicit FTP over TLS the client connects using TLS right from the start which breaks compatibility with non-TLS-aware servers. This is usually served over port 990 rather -than port 21. Cannot be used in combination with explicit FTP. +than port 21. Cannot be used in combination with explicit FTPS. Properties: @@ -232,7 +232,7 @@ Use Explicit FTPS (FTP over TLS). When using explicit FTP over TLS the client explicitly requests security from the server in order to upgrade a plain text connection -to an encrypted one. Cannot be used in combination with implicit FTP. +to an encrypted one. Cannot be used in combination with implicit FTPS. Properties: diff --git a/docs/content/local.md b/docs/content/local.md index 70fbf9315..15c577e83 100644 --- a/docs/content/local.md +++ b/docs/content/local.md @@ -429,7 +429,7 @@ Properties: Don't check to see if the files change during upload. Normally rclone checks the size and modification time of files as they -are being uploaded and aborts with a message which starts "can't copy - +are being uploaded and aborts with a message which starts "can't copy - source file is being updated" if the file changes during upload. However on some file systems this modification time check may fail (e.g. diff --git a/docs/content/mailru.md b/docs/content/mailru.md index 758cb4b9e..0f2f106c2 100644 --- a/docs/content/mailru.md +++ b/docs/content/mailru.md @@ -191,6 +191,11 @@ Properties: Password. +This must be an app password - rclone will not work with your normal +password. See the Configuration section in the docs for how to make an +app password. + + **NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/). Properties: diff --git a/docs/content/rc.md b/docs/content/rc.md index 3143b956d..1cb4dd55b 100644 --- a/docs/content/rc.md +++ b/docs/content/rc.md @@ -616,6 +616,14 @@ See the [config providers](/commands/rclone_config_providers/) command for more **Authentication is required for this call.** +### config/setpath: Set the path of the config file {#config-setpath} + +Parameters: + +- path - path to the config file to use + +**Authentication is required for this call.** + ### config/update: update the config for a remote. {#config-update} This takes the following parameters: @@ -714,7 +722,7 @@ Returns: "result": "" } -OR +OR { "error": true, "result": "" @@ -909,6 +917,22 @@ Parameters: - rate - int +### debug/set-gc-percent: Call runtime/debug.SetGCPercent for setting the garbage collection target percentage. {#debug-set-gc-percent} + +SetGCPercent sets the garbage collection target percentage: a collection is triggered +when the ratio of freshly allocated data to live data remaining after the previous collection +reaches this percentage. SetGCPercent returns the previous setting. The initial setting is the +value of the GOGC environment variable at startup, or 100 if the variable is not set. + +This setting may be effectively reduced in order to maintain a memory limit. +A negative percentage effectively disables garbage collection, unless the memory limit is reached. + +See https://pkg.go.dev/runtime/debug#SetMemoryLimit for more details. + +Parameters: + +- gc-percent - int + ### debug/set-mutex-profile-fraction: Set runtime.SetMutexProfileFraction for mutex profiling. {#debug-set-mutex-profile-fraction} SetMutexProfileFraction controls the fraction of mutex contention @@ -930,6 +954,38 @@ Results: - previousRate - int +### debug/set-soft-memory-limit: Call runtime/debug.SetMemoryLimit for setting a soft memory limit for the runtime. {#debug-set-soft-memory-limit} + +SetMemoryLimit provides the runtime with a soft memory limit. + +The runtime undertakes several processes to try to respect this memory limit, including +adjustments to the frequency of garbage collections and returning memory to the underlying +system more aggressively. This limit will be respected even if GOGC=off (or, if SetGCPercent(-1) is executed). + +The input limit is provided as bytes, and includes all memory mapped, managed, and not +released by the Go runtime. Notably, it does not account for space used by the Go binary +and memory external to Go, such as memory managed by the underlying system on behalf of +the process, or memory managed by non-Go code inside the same process. +Examples of excluded memory sources include: OS kernel memory held on behalf of the process, +memory allocated by C code, and memory mapped by syscall.Mmap (because it is not managed by the Go runtime). + +A zero limit or a limit that's lower than the amount of memory used by the Go runtime may cause +the garbage collector to run nearly continuously. However, the application may still make progress. + +The memory limit is always respected by the Go runtime, so to effectively disable this behavior, +set the limit very high. math.MaxInt64 is the canonical value for disabling the limit, but values +much greater than the available memory on the underlying system work just as well. + +See https://go.dev/doc/gc-guide for a detailed guide explaining the soft memory limit in more detail, +as well as a variety of common use-cases and scenarios. + +SetMemoryLimit returns the previously set memory limit. A negative input does not adjust the limit, +and allows for retrieval of the currently set memory limit. + +Parameters: + +- mem-limit - int + ### fscache/clear: Clear the Fs cache. {#fscache-clear} This clears the fs cache. This is where remotes created from backends diff --git a/docs/content/s3.md b/docs/content/s3.md index 34e34230f..172365e24 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -643,7 +643,7 @@ A simple solution is to set the `--s3-upload-cutoff 0` and force all the files t {{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs" >}} ### Standard options -Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). +Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). #### --s3-provider @@ -682,6 +682,8 @@ Properties: - IONOS Cloud - "LyveCloud" - Seagate Lyve Cloud + - "Liara" + - Liara Object Storage - "Minio" - Minio Object Storage - "Netease" @@ -1033,7 +1035,7 @@ Properties: - Config: region - Env Var: RCLONE_S3_REGION -- Provider: !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive +- Provider: !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive - Type: string - Required: false - Examples: @@ -1312,6 +1314,22 @@ Properties: #### --s3-endpoint +Endpoint for Liara Object Storage API. + +Properties: + +- Config: endpoint +- Env Var: RCLONE_S3_ENDPOINT +- Provider: Liara +- Type: string +- Required: false +- Examples: + - "storage.iran.liara.space" + - The default endpoint + - Iran + +#### --s3-endpoint + Endpoint for OSS API. Properties: @@ -1612,7 +1630,7 @@ Properties: - Config: endpoint - Env Var: RCLONE_S3_ENDPOINT -- Provider: !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu +- Provider: !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu - Type: string - Required: false - Examples: @@ -1639,15 +1657,33 @@ Properties: - "s3.ap-southeast-1.lyvecloud.seagate.com" - Seagate Lyve Cloud AP Southeast 1 (Singapore) - "s3.wasabisys.com" - - Wasabi US East endpoint + - Wasabi US East 1 (N. Virginia) + - "s3.us-east-2.wasabisys.com" + - Wasabi US East 2 (N. Virginia) + - "s3.us-central-1.wasabisys.com" + - Wasabi US Central 1 (Texas) - "s3.us-west-1.wasabisys.com" - - Wasabi US West endpoint + - Wasabi US West 1 (Oregon) + - "s3.ca-central-1.wasabisys.com" + - Wasabi CA Central 1 (Toronto) - "s3.eu-central-1.wasabisys.com" - - Wasabi EU Central endpoint + - Wasabi EU Central 1 (Amsterdam) + - "s3.eu-central-2.wasabisys.com" + - Wasabi EU Central 2 (Frankfurt) + - "s3.eu-west-1.wasabisys.com" + - Wasabi EU West 1 (London) + - "s3.eu-west-2.wasabisys.com" + - Wasabi EU West 2 (Paris) - "s3.ap-northeast-1.wasabisys.com" - Wasabi AP Northeast 1 (Tokyo) endpoint - "s3.ap-northeast-2.wasabisys.com" - Wasabi AP Northeast 2 (Osaka) endpoint + - "s3.ap-southeast-1.wasabisys.com" + - Wasabi AP Southeast 1 (Singapore) + - "s3.ap-southeast-2.wasabisys.com" + - Wasabi AP Southeast 2 (Sydney) + - "storage.iran.liara.space" + - Liara Iran endpoint - "s3.ir-thr-at1.arvanstorage.com" - ArvanCloud Tehran Iran (Asiatech) endpoint @@ -1980,7 +2016,7 @@ Properties: - Config: location_constraint - Env Var: RCLONE_S3_LOCATION_CONSTRAINT -- Provider: !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS +- Provider: !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS - Type: string - Required: false @@ -1995,6 +2031,10 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview Note that this ACL is applied when server-side copying objects as S3 doesn't copy the ACL from the source but rather writes a fresh one. +If the acl is an empty string then no X-Amz-Acl: header is added and +the default (private) will be used. + + Properties: - Config: acl @@ -2155,6 +2195,21 @@ Properties: #### --s3-storage-class +The storage class to use when storing new objects in Liara + +Properties: + +- Config: storage_class +- Env Var: RCLONE_S3_STORAGE_CLASS +- Provider: Liara +- Type: string +- Required: false +- Examples: + - "STANDARD" + - Standard storage class + +#### --s3-storage-class + The storage class to use when storing new objects in ArvanCloud. Properties: @@ -2233,7 +2288,7 @@ Properties: ### Advanced options -Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). +Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). #### --s3-bucket-acl @@ -2244,6 +2299,10 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview Note that this ACL is applied when only when creating buckets. If it isn't set then "acl" is used instead. +If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl: +header is added and the default (private) will be used. + + Properties: - Config: bucket_acl @@ -2866,6 +2925,37 @@ Properties: - Type: bool - Default: false +#### --s3-might-gzip + +Set this if the backend might gzip objects. + +Normally providers will not alter objects when they are downloaded. If +an object was not uploaded with `Content-Encoding: gzip` then it won't +be set on download. + +However some providers may gzip objects even if they weren't uploaded +with `Content-Encoding: gzip` (eg Cloudflare). + +A symptom of this would be receiving errors like + + ERROR corrupted on transfer: sizes differ NNN vs MMM + +If you set this flag and rclone downloads an object with +Content-Encoding: gzip set and chunked transfer encoding, then rclone +will decompress the object on the fly. + +If this is set to unset (the default) then rclone will choose +according to the provider setting what to apply, but you can override +rclone's choice here. + + +Properties: + +- Config: might_gzip +- Env Var: RCLONE_S3_MIGHT_GZIP +- Type: Tristate +- Default: unset + #### --s3-no-system-metadata Suppress setting and reading of system metadata diff --git a/docs/content/sftp.md b/docs/content/sftp.md index bbbbcf763..d7b23ee0f 100644 --- a/docs/content/sftp.md +++ b/docs/content/sftp.md @@ -528,6 +528,9 @@ This enables the use of the following insecure ciphers and key exchange methods: Those algorithms are insecure and may allow plaintext data to be recovered by an attacker. +This must be false if you use either ciphers or key_exchange advanced options. + + Properties: - Config: use_insecure_cipher @@ -860,6 +863,64 @@ Properties: - Type: SpaceSepList - Default: +#### --sftp-ciphers + +Space separated list of ciphers to be used for session encryption, ordered by preference. + +At least one must match with server configuration. This can be checked for example using ssh -Q cipher. + +This must not be set if use_insecure_cipher is true. + +Example: + + aes128-ctr aes192-ctr aes256-ctr aes128-gcm@openssh.com aes256-gcm@openssh.com + + +Properties: + +- Config: ciphers +- Env Var: RCLONE_SFTP_CIPHERS +- Type: SpaceSepList +- Default: + +#### --sftp-key-exchange + +Space separated list of key exchange algorithms, ordered by preference. + +At least one must match with server configuration. This can be checked for example using ssh -Q kex. + +This must not be set if use_insecure_cipher is true. + +Example: + + sntrup761x25519-sha512@openssh.com curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256 + + +Properties: + +- Config: key_exchange +- Env Var: RCLONE_SFTP_KEY_EXCHANGE +- Type: SpaceSepList +- Default: + +#### --sftp-macs + +Space separated list of MACs (message authentication code) algorithms, ordered by preference. + +At least one must match with server configuration. This can be checked for example using ssh -Q mac. + +Example: + + umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com + + +Properties: + +- Config: macs +- Env Var: RCLONE_SFTP_MACS +- Type: SpaceSepList +- Default: + {{< rem autogenerated options stop >}} ## Limitations diff --git a/go.mod b/go.mod index 873b24fb9..8cf92794b 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 github.com/abbot/go-http-auth v0.4.0 github.com/anacrolix/dms v1.5.0 + github.com/anacrolix/log v0.13.1 github.com/artyom/mtab v1.0.0 github.com/atotto/clipboard v0.1.4 github.com/aws/aws-sdk-go v1.44.145 @@ -48,6 +49,7 @@ require ( github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 github.com/rclone/ftp v0.0.0-20221014110213-e44dedbc76c6 github.com/rfjakob/eme v1.1.2 + github.com/rivo/uniseg v0.2.0 github.com/shirou/gopsutil/v3 v3.22.10 github.com/sirupsen/logrus v1.9.0 github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 @@ -78,7 +80,6 @@ require ( cloud.google.com/go/compute/metadata v0.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect - github.com/anacrolix/log v0.13.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/calebcase/tmpfile v1.0.3 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect @@ -116,7 +117,6 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect - github.com/rivo/uniseg v0.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sony/gobreaker v0.5.0 // indirect github.com/spacemonkeygo/monkit/v3 v3.0.17 // indirect diff --git a/go.sum b/go.sum index 38a637db5..e1e85c49c 100644 --- a/go.sum +++ b/go.sum @@ -1033,7 +1033,6 @@ golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= @@ -1175,7 +1174,6 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1183,7 +1181,6 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= @@ -1197,7 +1194,6 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= diff --git a/rclone.1 b/rclone.1 index fd0219cdd..2b14cd4ff 100644 --- a/rclone.1 +++ b/rclone.1 @@ -1,7 +1,7 @@ .\"t .\" Automatically generated by Pandoc 2.9.2.1 .\" -.TH "rclone" "1" "Oct 21, 2022" "User Manual" "" +.TH "rclone" "1" "Dec 20, 2022" "User Manual" "" .hy .SH Rclone syncs your files to cloud storage .PP @@ -203,6 +203,8 @@ IONOS Cloud .IP \[bu] 2 Koofr .IP \[bu] 2 +Liara Object Storage +.IP \[bu] 2 Mail.ru Cloud .IP \[bu] 2 Memset Memstore @@ -490,13 +492,13 @@ links. If not sure, use the first link. .IP \[bu] 2 Intel/AMD - 64 -Bit (https://downloads.rclone.org/rclone-current-linux-amd64.zip) +Bit (https://downloads.rclone.org/rclone-current-windows-amd64.zip) .IP \[bu] 2 Intel/AMD - 32 -Bit (https://downloads.rclone.org/rclone-current-linux-386.zip) +Bit (https://downloads.rclone.org/rclone-current-windows-386.zip) .IP \[bu] 2 ARM - 64 -Bit (https://downloads.rclone.org/rclone-current-linux-arm64.zip) +Bit (https://downloads.rclone.org/rclone-current-windows-arm64.zip) .PP Open this file in the Explorer and extract \f[C]rclone.exe\f[R]. Rclone is a portable executable so you can place it wherever is @@ -3035,7 +3037,7 @@ This will look something like (some irrelevant detail removed): \[dq]State\[dq]: \[dq]*oauth-islocal,teamdrive,,\[dq], \[dq]Option\[dq]: { \[dq]Name\[dq]: \[dq]config_is_local\[dq], - \[dq]Help\[dq]: \[dq]Use auto config?\[rs]n * Say Y if not sure\[rs]n * Say N if you are working on a remote or headless machine\[rs]n\[dq], + \[dq]Help\[dq]: \[dq]Use web browser to automatically authenticate rclone with remote?\[rs]n * Say Y if the machine running rclone has a web browser you can use\[rs]n * Say N if running rclone on a (remote) machine without web browser access\[rs]nIf not sure try Y. If Y failed, try N.\[rs]n\[dq], \[dq]Default\[dq]: true, \[dq]Examples\[dq]: [ { @@ -3490,7 +3492,7 @@ This will look something like (some irrelevant detail removed): \[dq]State\[dq]: \[dq]*oauth-islocal,teamdrive,,\[dq], \[dq]Option\[dq]: { \[dq]Name\[dq]: \[dq]config_is_local\[dq], - \[dq]Help\[dq]: \[dq]Use auto config?\[rs]n * Say Y if not sure\[rs]n * Say N if you are working on a remote or headless machine\[rs]n\[dq], + \[dq]Help\[dq]: \[dq]Use web browser to automatically authenticate rclone with remote?\[rs]n * Say Y if the machine running rclone has a web browser you can use\[rs]n * Say N if running rclone on a (remote) machine without web browser access\[rs]nIf not sure try Y. If Y failed, try N.\[rs]n\[dq], \[dq]Default\[dq]: true, \[dq]Examples\[dq]: [ { @@ -4605,6 +4607,7 @@ rclone lsjson remote:path [flags] --hash Include hashes in the output (may take longer) --hash-type stringArray Show only this hash type (may be repeated) -h, --help help for lsjson + -M, --metadata Add metadata to the listing --no-mimetype Don\[aq]t read the mime type (can speed things up) --no-modtime Don\[aq]t read the modification time (can speed things up) --original Show the ID of the underlying Object @@ -5518,14 +5521,14 @@ rclone mount remote:path /path/to/mountpoint [flags] --allow-other Allow access to other users (not supported on Windows) --allow-root Allow access to root user (not supported on Windows) --async-read Use asynchronous reads (not supported on Windows) (default true) - --attr-timeout duration Time for which file/directory attributes are cached (default 1s) + --attr-timeout Duration Time for which file/directory attributes are cached (default 1s) --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows) - --daemon-timeout duration Time limit for rclone to respond to kernel (not supported on Windows) - --daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) + --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s) + --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) --debug-fuse Debug the FUSE internals - needs -v --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) @@ -5539,24 +5542,24 @@ rclone mount remote:path /path/to/mountpoint [flags] --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true) --noapplexattr Ignore all \[dq]com.apple.*\[dq] extended attributes (supported on OSX only) -o, --option stringArray Option for libfuse/WinFsp (repeat if required) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) \f[R] @@ -5657,11 +5660,12 @@ The supported keys are: \[ua],\[da] or k,j to Move \[->],l to enter \[<-],h to return - c toggle counts g toggle graph + c toggle counts a toggle average size in directory + m toggle modified time u toggle human-readable format - n,s,C,A sort by name,size,count,average size + n,s,C,A,M sort by name,size,count,asize,mtime d delete file/directory v select file/directory V enter visual select mode @@ -5965,6 +5969,186 @@ It will also open the URL in the browser when rclone is run. .PP See the rc documentation (https://rclone.org/rc/) for more info on the rc flags. +.SS Server options +.PP +Use \f[C]--addr\f[R] to specify which IP address and port the server +should listen on, eg \f[C]--addr 1.2.3.4:8000\f[R] or +\f[C]--addr :8080\f[R] to listen to all IPs. +By default it only listens on localhost. +You can use port :0 to let the OS choose an available port. +.PP +If you set \f[C]--addr\f[R] to listen on a public or LAN accessible IP +address then using Authentication is advised - see the next section for +info. +.PP +You can use a unix socket by setting the url to +\f[C]unix:///path/to/socket\f[R] or just by using an absolute path name. +Note that unix sockets bypass the authentication - this is expected to +be done with file system permissions. +.PP +\f[C]--addr\f[R] may be repeated to listen on multiple +IPs/ports/sockets. +.PP +\f[C]--server-read-timeout\f[R] and \f[C]--server-write-timeout\f[R] can +be used to control the timeouts on the server. +Note that this is the total time for a transfer. +.PP +\f[C]--max-header-bytes\f[R] controls the maximum number of bytes the +server will accept in the HTTP header. +.PP +\f[C]--baseurl\f[R] controls the URL prefix that rclone serves from. +By default rclone will serve from the root. +If you used \f[C]--baseurl \[dq]/rclone\[dq]\f[R] then rclone would +serve from a URL starting with \[dq]/rclone/\[dq]. +This is useful if you wish to proxy rclone serve. +Rclone automatically inserts leading and trailing \[dq]/\[dq] on +\f[C]--baseurl\f[R], so \f[C]--baseurl \[dq]rclone\[dq]\f[R], +\f[C]--baseurl \[dq]/rclone\[dq]\f[R] and +\f[C]--baseurl \[dq]/rclone/\[dq]\f[R] are all treated identically. +.SS TLS (SSL) +.PP +By default this will serve over http. +If you want you can serve over https. +You will need to supply the \f[C]--cert\f[R] and \f[C]--key\f[R] flags. +If you wish to do client side certificate validation then you will need +to supply \f[C]--client-ca\f[R] also. +.PP +\f[C]--cert\f[R] should be a either a PEM encoded certificate or a +concatenation of that with the CA certificate. +\f[C]--key\f[R] should be the PEM encoded private key and +\f[C]--client-ca\f[R] should be the PEM encoded client certificate +authority certificate. +.PP +--min-tls-version is minimum TLS version that is acceptable. +Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] +and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). +.SS Template +.PP +\f[C]--template\f[R] allows a user to specify a custom markup template +for HTTP and WebDAV serve functions. +The server exports the following markup to be used within the template +to server pages: +.PP +.TS +tab(@); +lw(35.0n) lw(35.0n). +T{ +Parameter +T}@T{ +Description +T} +_ +T{ +\&.Name +T}@T{ +The full path of a file/directory. +T} +T{ +\&.Title +T}@T{ +Directory listing of .Name +T} +T{ +\&.Sort +T}@T{ +The current sort used. +This is changeable via ?sort= parameter +T} +T{ +T}@T{ +Sort Options: namedirfirst,name,size,time (default namedirfirst) +T} +T{ +\&.Order +T}@T{ +The current ordering used. +This is changeable via ?order= parameter +T} +T{ +T}@T{ +Order Options: asc,desc (default asc) +T} +T{ +\&.Query +T}@T{ +Currently unused. +T} +T{ +\&.Breadcrumb +T}@T{ +Allows for creating a relative navigation +T} +T{ +-- .Link +T}@T{ +The relative to the root link of the Text. +T} +T{ +-- .Text +T}@T{ +The Name of the directory. +T} +T{ +\&.Entries +T}@T{ +Information about a specific file/directory. +T} +T{ +-- .URL +T}@T{ +The \[aq]url\[aq] of an entry. +T} +T{ +-- .Leaf +T}@T{ +Currently same as \[aq]URL\[aq] but intended to be \[aq]just\[aq] the +name. +T} +T{ +-- .IsDir +T}@T{ +Boolean for if an entry is a directory or not. +T} +T{ +-- .Size +T}@T{ +Size in Bytes of the entry. +T} +T{ +-- .ModTime +T}@T{ +The UTC timestamp of an entry. +T} +.TE +.SS Authentication +.PP +By default this will serve files without needing a login. +.PP +You can either use an htpasswd file which can take lots of users, or set +a single username and password with the \f[C]--user\f[R] and +\f[C]--pass\f[R] flags. +.PP +Use \f[C]--htpasswd /path/to/htpasswd\f[R] to provide an htpasswd file. +This is in standard apache format and supports MD5, SHA1 and BCrypt for +basic authentication. +Bcrypt is recommended. +.PP +To create an htpasswd file: +.IP +.nf +\f[C] +touch htpasswd +htpasswd -B htpasswd user +htpasswd -B htpasswd anotherUser +\f[R] +.fi +.PP +The password file can be updated while rclone is running. +.PP +Use \f[C]--realm\f[R] to set the authentication realm. +.PP +Use \f[C]--salt\f[R] to change the password hashing salt from the +default. .IP .nf \f[C] @@ -6610,8 +6794,8 @@ rclone serve dlna remote:path [flags] .nf \f[C] --addr string The ip:port or :port to bind the DLNA http server to (default \[dq]:7879\[dq]) - --announce-interval duration The interval between SSDP announcements (default 12m0s) - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --announce-interval Duration The interval between SSDP announcements (default 12m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -6622,24 +6806,24 @@ rclone serve dlna remote:path [flags] --no-checksum Don\[aq]t compare checksums on up/download --no-modtime Don\[aq]t read/write the modification time (can speed things up) --no-seek Don\[aq]t allow seeking in files - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi .PP @@ -7109,15 +7293,15 @@ rclone serve docker [flags] --allow-other Allow access to other users (not supported on Windows) --allow-root Allow access to root user (not supported on Windows) --async-read Use asynchronous reads (not supported on Windows) (default true) - --attr-timeout duration Time for which file/directory attributes are cached (default 1s) + --attr-timeout Duration Time for which file/directory attributes are cached (default 1s) --base-dir string Base directory for volumes (default \[dq]/var/lib/docker-volumes/rclone\[dq]) --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows) - --daemon-timeout duration Time limit for rclone to respond to kernel (not supported on Windows) - --daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) + --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s) + --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s) --debug-fuse Debug the FUSE internals - needs -v --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --forget-state Skip restoring previous state @@ -7133,26 +7317,26 @@ rclone serve docker [flags] --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true) --noapplexattr Ignore all \[dq]com.apple.*\[dq] extended attributes (supported on OSX only) -o, --option stringArray Option for libfuse/WinFsp (repeat if required) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --socket-addr string Address or absolute path (default: /run/docker/plugins/rclone.sock) --socket-gid int GID for unix socket (default: current process GID) (default 1000) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) \f[R] @@ -7686,7 +7870,7 @@ rclone serve ftp remote:path [flags] --addr string IPaddress:Port or :Port to bind server to (default \[dq]localhost:2121\[dq]) --auth-proxy string A program to use to create the backend from the auth --cert string TLS PEM key (concatenation of certificate and CA certificate) - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -7697,26 +7881,26 @@ rclone serve ftp remote:path [flags] --no-seek Don\[aq]t allow seeking in files --pass string Password for authentication (empty value allow every password) --passive-port string Passive port range to use (default \[dq]30000-32000\[dq]) - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --public-ip string Public IP address to advertise for passive connections --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication (default \[dq]anonymous\[dq]) - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi .PP @@ -7755,6 +7939,14 @@ If you set \f[C]--addr\f[R] to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. .PP +You can use a unix socket by setting the url to +\f[C]unix:///path/to/socket\f[R] or just by using an absolute path name. +Note that unix sockets bypass the authentication - this is expected to +be done with file system permissions. +.PP +\f[C]--addr\f[R] may be repeated to listen on multiple +IPs/ports/sockets. +.PP \f[C]--server-read-timeout\f[R] and \f[C]--server-write-timeout\f[R] can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -7771,7 +7963,7 @@ Rclone automatically inserts leading and trailing \[dq]/\[dq] on \f[C]--baseurl\f[R], so \f[C]--baseurl \[dq]rclone\[dq]\f[R], \f[C]--baseurl \[dq]/rclone\[dq]\f[R] and \f[C]--baseurl \[dq]/rclone/\[dq]\f[R] are all treated identically. -.SS SSL/TLS +.SS TLS (SSL) .PP By default this will serve over http. If you want you can serve over https. @@ -8317,47 +8509,47 @@ rclone serve http remote:path [flags] .IP .nf \f[C] - --addr string IPaddress:Port or :Port to bind server to (default \[dq]127.0.0.1:8080\[dq]) + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --baseurl string Prefix for URLs - leave blank for root - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for http --htpasswd string A htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default \[dq]tls1.0\[dq]) --no-checksum Don\[aq]t compare checksums on up/download --no-modtime Don\[aq]t read/write the modification time (can speed things up) --no-seek Don\[aq]t allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --realm string Realm for authentication --salt string Password hashing salt (default \[dq]dlPL2MqE\[dq]) - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi .PP @@ -8372,7 +8564,7 @@ remote over a protocol. Serve the remote for restic\[aq]s REST API. .SS Synopsis .PP -Run a basic web server to serve a remove over restic\[aq]s REST backend +Run a basic web server to serve a remote over restic\[aq]s REST backend API over HTTP. This allows restic to use rclone as a data storage mechanism for cloud providers that restic does not support directly. @@ -8469,9 +8661,8 @@ repositories starting with a path of \f[C]//\f[R]. .SS Server options .PP Use \f[C]--addr\f[R] to specify which IP address and port the server -should listen on, e.g. -\f[C]--addr 1.2.3.4:8000\f[R] or \f[C]--addr :8080\f[R] to listen to all -IPs. +should listen on, eg \f[C]--addr 1.2.3.4:8000\f[R] or +\f[C]--addr :8080\f[R] to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port. .PP @@ -8479,6 +8670,14 @@ If you set \f[C]--addr\f[R] to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. .PP +You can use a unix socket by setting the url to +\f[C]unix:///path/to/socket\f[R] or just by using an absolute path name. +Note that unix sockets bypass the authentication - this is expected to +be done with file system permissions. +.PP +\f[C]--addr\f[R] may be repeated to listen on multiple +IPs/ports/sockets. +.PP \f[C]--server-read-timeout\f[R] and \f[C]--server-write-timeout\f[R] can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -8495,103 +8694,23 @@ Rclone automatically inserts leading and trailing \[dq]/\[dq] on \f[C]--baseurl\f[R], so \f[C]--baseurl \[dq]rclone\[dq]\f[R], \f[C]--baseurl \[dq]/rclone\[dq]\f[R] and \f[C]--baseurl \[dq]/rclone/\[dq]\f[R] are all treated identically. +.SS TLS (SSL) .PP -\f[C]--template\f[R] allows a user to specify a custom markup template -for HTTP and WebDAV serve functions. -The server exports the following markup to be used within the template -to server pages: +By default this will serve over http. +If you want you can serve over https. +You will need to supply the \f[C]--cert\f[R] and \f[C]--key\f[R] flags. +If you wish to do client side certificate validation then you will need +to supply \f[C]--client-ca\f[R] also. .PP -.TS -tab(@); -lw(35.0n) lw(35.0n). -T{ -Parameter -T}@T{ -Description -T} -_ -T{ -\&.Name -T}@T{ -The full path of a file/directory. -T} -T{ -\&.Title -T}@T{ -Directory listing of .Name -T} -T{ -\&.Sort -T}@T{ -The current sort used. -This is changeable via ?sort= parameter -T} -T{ -T}@T{ -Sort Options: namedirfirst,name,size,time (default namedirfirst) -T} -T{ -\&.Order -T}@T{ -The current ordering used. -This is changeable via ?order= parameter -T} -T{ -T}@T{ -Order Options: asc,desc (default asc) -T} -T{ -\&.Query -T}@T{ -Currently unused. -T} -T{ -\&.Breadcrumb -T}@T{ -Allows for creating a relative navigation -T} -T{ --- .Link -T}@T{ -The relative to the root link of the Text. -T} -T{ --- .Text -T}@T{ -The Name of the directory. -T} -T{ -\&.Entries -T}@T{ -Information about a specific file/directory. -T} -T{ --- .URL -T}@T{ -The \[aq]url\[aq] of an entry. -T} -T{ --- .Leaf -T}@T{ -Currently same as \[aq]URL\[aq] but intended to be \[aq]just\[aq] the -name. -T} -T{ --- .IsDir -T}@T{ -Boolean for if an entry is a directory or not. -T} -T{ --- .Size -T}@T{ -Size in Bytes of the entry. -T} -T{ --- .ModTime -T}@T{ -The UTC timestamp of an entry. -T} -.TE +\f[C]--cert\f[R] should be a either a PEM encoded certificate or a +concatenation of that with the CA certificate. +\f[C]--key\f[R] should be the PEM encoded private key and +\f[C]--client-ca\f[R] should be the PEM encoded client certificate +authority certificate. +.PP +--min-tls-version is minimum TLS version that is acceptable. +Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] +and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). .SS Authentication .PP By default this will serve files without needing a login. @@ -8618,23 +8737,9 @@ htpasswd -B htpasswd anotherUser The password file can be updated while rclone is running. .PP Use \f[C]--realm\f[R] to set the authentication realm. -.SS SSL/TLS .PP -By default this will serve over HTTP. -If you want you can serve over HTTPS. -You will need to supply the \f[C]--cert\f[R] and \f[C]--key\f[R] flags. -If you wish to do client side certificate validation then you will need -to supply \f[C]--client-ca\f[R] also. -.PP -\f[C]--cert\f[R] should be either a PEM encoded certificate or a -concatenation of that with the CA certificate. -\f[C]--key\f[R] should be the PEM encoded private key and -\f[C]--client-ca\f[R] should be the PEM encoded client certificate -authority certificate. -.PP ---min-tls-version is minimum TLS version that is acceptable. -Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] -and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). +Use \f[C]--salt\f[R] to change the password hashing salt from the +default. .IP .nf \f[C] @@ -8645,24 +8750,24 @@ rclone serve restic remote:path [flags] .IP .nf \f[C] - --addr string IPaddress:Port or :Port to bind server to (default \[dq]localhost:8080\[dq]) + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --append-only Disallow deletion of repository data --baseurl string Prefix for URLs - leave blank for root --cache-objects Cache listed objects (default true) - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with -h, --help help for restic - --htpasswd string htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --htpasswd string A htpasswd file - if not provided no authentication is done + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default \[dq]tls1.0\[dq]) --pass string Password for authentication --private-repos Users can only access their private repo - --realm string Realm for authentication (default \[dq]rclone\[dq]) - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --realm string Realm for authentication + --salt string Password hashing salt (default \[dq]dlPL2MqE\[dq]) + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --stdio Run an HTTP2 server on stdin/stdout - --template string User-specified template --user string User name for authentication \f[R] .fi @@ -9237,7 +9342,7 @@ rclone serve sftp remote:path [flags] --addr string IPaddress:Port or :Port to bind server to (default \[dq]localhost:2022\[dq]) --auth-proxy string A program to use to create the backend from the auth --authorized-keys string Authorized keys file (default \[dq]\[ti]/.ssh/authorized_keys\[dq]) - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -9248,26 +9353,26 @@ rclone serve sftp remote:path [flags] --no-modtime Don\[aq]t read/write the modification time (can speed things up) --no-seek Don\[aq]t allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --stdio Run an sftp server on stdin/stdout --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi .PP @@ -9301,9 +9406,8 @@ see the full list. .SS Server options .PP Use \f[C]--addr\f[R] to specify which IP address and port the server -should listen on, e.g. -\f[C]--addr 1.2.3.4:8000\f[R] or \f[C]--addr :8080\f[R] to listen to all -IPs. +should listen on, eg \f[C]--addr 1.2.3.4:8000\f[R] or +\f[C]--addr :8080\f[R] to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port. .PP @@ -9311,6 +9415,14 @@ If you set \f[C]--addr\f[R] to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info. .PP +You can use a unix socket by setting the url to +\f[C]unix:///path/to/socket\f[R] or just by using an absolute path name. +Note that unix sockets bypass the authentication - this is expected to +be done with file system permissions. +.PP +\f[C]--addr\f[R] may be repeated to listen on multiple +IPs/ports/sockets. +.PP \f[C]--server-read-timeout\f[R] and \f[C]--server-write-timeout\f[R] can be used to control the timeouts on the server. Note that this is the total time for a transfer. @@ -9327,6 +9439,24 @@ Rclone automatically inserts leading and trailing \[dq]/\[dq] on \f[C]--baseurl\f[R], so \f[C]--baseurl \[dq]rclone\[dq]\f[R], \f[C]--baseurl \[dq]/rclone\[dq]\f[R] and \f[C]--baseurl \[dq]/rclone/\[dq]\f[R] are all treated identically. +.SS TLS (SSL) +.PP +By default this will serve over http. +If you want you can serve over https. +You will need to supply the \f[C]--cert\f[R] and \f[C]--key\f[R] flags. +If you wish to do client side certificate validation then you will need +to supply \f[C]--client-ca\f[R] also. +.PP +\f[C]--cert\f[R] should be a either a PEM encoded certificate or a +concatenation of that with the CA certificate. +\f[C]--key\f[R] should be the PEM encoded private key and +\f[C]--client-ca\f[R] should be the PEM encoded client certificate +authority certificate. +.PP +--min-tls-version is minimum TLS version that is acceptable. +Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] +and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). +.SS Template .PP \f[C]--template\f[R] allows a user to specify a custom markup template for HTTP and WebDAV serve functions. @@ -9450,23 +9580,9 @@ htpasswd -B htpasswd anotherUser The password file can be updated while rclone is running. .PP Use \f[C]--realm\f[R] to set the authentication realm. -.SS SSL/TLS .PP -By default this will serve over HTTP. -If you want you can serve over HTTPS. -You will need to supply the \f[C]--cert\f[R] and \f[C]--key\f[R] flags. -If you wish to do client side certificate validation then you will need -to supply \f[C]--client-ca\f[R] also. -.PP -\f[C]--cert\f[R] should be either a PEM encoded certificate or a -concatenation of that with the CA certificate. -\f[C]--key\f[R] should be the PEM encoded private key and -\f[C]--client-ca\f[R] should be the PEM encoded client certificate -authority certificate. -.PP ---min-tls-version is minimum TLS version that is acceptable. -Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] -and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). +Use \f[C]--salt\f[R] to change the password hashing salt from the +default. .SS VFS - Virtual File System .PP This command uses the VFS layer. @@ -9962,49 +10078,50 @@ rclone serve webdav remote:path [flags] .IP .nf \f[C] - --addr string IPaddress:Port or :Port to bind server to (default \[dq]localhost:8080\[dq]) + --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root - --cert string SSL PEM key (concatenation of certificate and CA certificate) + --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with - --dir-cache-time duration Time to cache directory entries for (default 5m0s) + --dir-cache-time Duration Time to cache directory entries for (default 5m0s) --dir-perms FileMode Directory permissions (default 0777) --disable-dir-list Disable HTML directory list on GET request for a directory --etag-hash string Which hash to use for the ETag, or auto or blank for off --file-perms FileMode File permissions (default 0666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for webdav - --htpasswd string htpasswd file - if not provided no authentication is done - --key string SSL PEM Private key + --htpasswd string A htpasswd file - if not provided no authentication is done + --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default \[dq]tls1.0\[dq]) --no-checksum Don\[aq]t compare checksums on up/download --no-modtime Don\[aq]t read/write the modification time (can speed things up) --no-seek Don\[aq]t allow seeking in files --pass string Password for authentication - --poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) + --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access - --realm string Realm for authentication (default \[dq]rclone\[dq]) - --server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --realm string Realm for authentication + --salt string Password hashing salt (default \[dq]dlPL2MqE\[dq]) + --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) --user string User name for authentication - --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s) + --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) - --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) + --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s) --vfs-case-insensitive If a file name not found, find a case insensitive match --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off) --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) - --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) + --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-used-is-size rclone size Use the rclone size algorithm for Used size - --vfs-write-back duration Time to writeback files after last use when using cache (default 5s) - --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) + --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) + --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi .PP @@ -10143,7 +10260,7 @@ rclone test changenotify remote: [flags] .nf \f[C] -h, --help help for changenotify - --poll-interval duration Time to wait between polling for changes (default 10s) + --poll-interval Duration Time to wait between polling for changes (default 10s) \f[R] .fi .PP @@ -10212,7 +10329,7 @@ rclone test info [remote:path]+ [flags] --check-normalization Check UTF-8 Normalization --check-streaming Check uploads with indeterminate file size -h, --help help for info - --upload-wait duration Wait after writing a file + --upload-wait Duration Wait after writing a file (default 0s) --write-json string Write results to file \f[R] .fi @@ -10270,6 +10387,7 @@ rclone test makefiles [flags] --files int Number of files to create (default 1000) --files-per-directory int Average number of files per directory (default 10) -h, --help help for makefiles + --max-depth int Maximum depth of directory hierarchy (default 10) --max-file-size SizeSuffix Maximum size of files to create (default 100) --max-name-length int Maximum size of file names (default 12) --min-file-size SizeSuffix Minimum size of file to create @@ -10413,7 +10531,6 @@ rclone tree remote:path [flags] .nf \f[C] -a, --all All files are listed (list . files too) - -C, --color Turn colorization on always -d, --dirs-only List directories only --dirsfirst List directories before files (-U disables) --full-path Print the full path prefix for each file @@ -10735,10 +10852,20 @@ DEBUG : :s3: detected overridden config - adding \[dq]{YTu53}\[dq] suffix to nam .SS Valid remote names .PP Remote names are case sensitive, and must adhere to the following rules: -- May only contain \f[C]0\f[R]-\f[C]9\f[R], \f[C]A\f[R]-\f[C]Z\f[R], -\f[C]a\f[R]-\f[C]z\f[R], \f[C]_\f[R], \f[C]-\f[R], \f[C].\f[R] and +- May contain number, letter, \f[C]_\f[R], \f[C]-\f[R], \f[C].\f[R] and space. - May not start with \f[C]-\f[R] or space. +- May not end with space. +.PP +Starting with rclone version 1.61, any Unicode numbers and letters are +allowed, while in older versions it was limited to plain ASCII (0-9, +A-Z, a-z). +If you use the same rclone configuration from different shells, which +may be configured with different character encoding, you must be +cautious to use characters that are possible to write in all of them. +This is mostly a problem on Windows, where the console traditionally +uses a non-Unicode character set - defined by the so-called \[dq]code +page\[dq]. .SS Quoting and the shell .PP When you are typing commands to your computer you are using something @@ -11408,6 +11535,18 @@ much quicker than without the \f[C]--checksum\f[R] flag. .PP When using this flag, rclone won\[aq]t update mtimes of remote files if they are incorrect as it would normally. +.SS --color WHEN +.PP +Specifiy when colors (and other ANSI codes) should be added to the +output. +.PP +\f[C]AUTO\f[R] (default) only allows ANSI codes when the output is a +terminal +.PP +\f[C]NEVER\f[R] never allow ANSI codes +.PP +\f[C]ALWAYS\f[R] always add ANSI codes, regardless of the output format +(terminal or file) .SS --compare-dest=DIR .PP When using \f[C]sync\f[R], \f[C]copy\f[R] or \f[C]move\f[R] DIR is @@ -13101,6 +13240,18 @@ For the filtering options \f[C]--max-age\f[R] .IP \[bu] 2 \f[C]--dump filters\f[R] +.IP \[bu] 2 +\f[C]--metadata-include\f[R] +.IP \[bu] 2 +\f[C]--metadata-include-from\f[R] +.IP \[bu] 2 +\f[C]--metadata-exclude\f[R] +.IP \[bu] 2 +\f[C]--metadata-exclude-from\f[R] +.IP \[bu] 2 +\f[C]--metadata-filter\f[R] +.IP \[bu] 2 +\f[C]--metadata-filter-from\f[R] .PP See the filtering section (https://rclone.org/filtering/). .SS Remote control @@ -13369,15 +13520,17 @@ There are two ways of doing it, described below. .SS Configuring using rclone authorize .PP On the headless box run \f[C]rclone\f[R] config but answer \f[C]N\f[R] -to the \f[C]Use auto config?\f[R] question. +to the \f[C]Use web browser to automatically authenticate?\f[R] +question. .IP .nf \f[C] \&... Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> n @@ -13469,15 +13622,17 @@ ssh -L localhost:53682:localhost:53682 username\[at]remote_server .fi .PP Then on the headless box run \f[C]rclone\f[R] config and answer -\f[C]Y\f[R] to the \f[C]Use auto config?\f[R] question. +\f[C]Y\f[R] to the +\f[C]Use web browser to automatically authenticate?\f[R] question. .IP .nf \f[C] \&... Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> y @@ -14618,6 +14773,58 @@ dir1/dir2/dir3/.ignore .PP The command \f[C]rclone ls --exclude-if-present .ignore dir1\f[R] does not list \f[C]dir3\f[R], \f[C]file3\f[R] or \f[C].ignore\f[R]. +.SS Metadata filters +.PP +The metadata filters work in a very similar way to the normal file name +filters, except they match metadata (https://rclone.org/docs/#metadata) +on the object. +.PP +The metadata should be specified as \f[C]key=value\f[R] patterns. +This may be wildcarded using the normal filter patterns or regular +expressions. +.PP +For example if you wished to list only local files with a mode of +\f[C]100664\f[R] you could do that with: +.IP +.nf +\f[C] +rclone lsf -M --files-only --metadata-include \[dq]mode=100664\[dq] . +\f[R] +.fi +.PP +Or if you wished to show files with an \f[C]atime\f[R], \f[C]mtime\f[R] +or \f[C]btime\f[R] at a given date: +.IP +.nf +\f[C] +rclone lsf -M --files-only --metadata-include \[dq][abm]time=2022-12-16*\[dq] . +\f[R] +.fi +.PP +Like file filtering, metadata filtering only applies to files not to +directories. +.PP +The filters can be applied using these flags. +.IP \[bu] 2 +\f[C]--metadata-include\f[R] - Include metadatas matching pattern +.IP \[bu] 2 +\f[C]--metadata-include-from\f[R] - Read metadata include patterns from +file (use - to read from stdin) +.IP \[bu] 2 +\f[C]--metadata-exclude\f[R] - Exclude metadatas matching pattern +.IP \[bu] 2 +\f[C]--metadata-exclude-from\f[R] - Read metadata exclude patterns from +file (use - to read from stdin) +.IP \[bu] 2 +\f[C]--metadata-filter\f[R] - Add a metadata filtering rule +.IP \[bu] 2 +\f[C]--metadata-filter-from\f[R] - Read metadata filtering patterns from +a file (use - to read from stdin) +.PP +Each flag can be repeated. +See the section on how filter rules are applied for more details - these +flags work in an identical way to the file name filtering flags, but +instead of file name patterns have metadata patterns. .SS Common pitfalls .PP The most frequent filter support issues on the rclone @@ -15460,6 +15667,13 @@ providers (https://rclone.org/commands/rclone_config_providers/) command for more information on the above. .PP \f[B]Authentication is required for this call.\f[R] +.SS config/setpath: Set the path of the config file +.PP +Parameters: +.IP \[bu] 2 +path - path to the config file to use +.PP +\f[B]Authentication is required for this call.\f[R] .SS config/update: update the config for a remote. .PP This takes the following parameters: @@ -15599,7 +15813,7 @@ Returns: \[dq]result\[dq]: \[dq]\[dq] } -OR +OR { \[dq]error\[dq]: true, \[dq]result\[dq]: \[dq]\[dq] @@ -15818,6 +16032,25 @@ go tool pprof http://localhost:5572/debug/pprof/block Parameters: .IP \[bu] 2 rate - int +.SS debug/set-gc-percent: Call runtime/debug.SetGCPercent for setting the garbage collection target percentage. +.PP +SetGCPercent sets the garbage collection target percentage: a collection +is triggered when the ratio of freshly allocated data to live data +remaining after the previous collection reaches this percentage. +SetGCPercent returns the previous setting. +The initial setting is the value of the GOGC environment variable at +startup, or 100 if the variable is not set. +.PP +This setting may be effectively reduced in order to maintain a memory +limit. +A negative percentage effectively disables garbage collection, unless +the memory limit is reached. +.PP +See https://pkg.go.dev/runtime/debug#SetMemoryLimit for more details. +.PP +Parameters: +.IP \[bu] 2 +gc-percent - int .SS debug/set-mutex-profile-fraction: Set runtime.SetMutexProfileFraction for mutex profiling. .PP SetMutexProfileFraction controls the fraction of mutex contention events @@ -15844,6 +16077,48 @@ rate - int Results: .IP \[bu] 2 previousRate - int +.SS debug/set-soft-memory-limit: Call runtime/debug.SetMemoryLimit for setting a soft memory limit for the runtime. +.PP +SetMemoryLimit provides the runtime with a soft memory limit. +.PP +The runtime undertakes several processes to try to respect this memory +limit, including adjustments to the frequency of garbage collections and +returning memory to the underlying system more aggressively. +This limit will be respected even if GOGC=off (or, if SetGCPercent(-1) +is executed). +.PP +The input limit is provided as bytes, and includes all memory mapped, +managed, and not released by the Go runtime. +Notably, it does not account for space used by the Go binary and memory +external to Go, such as memory managed by the underlying system on +behalf of the process, or memory managed by non-Go code inside the same +process. +Examples of excluded memory sources include: OS kernel memory held on +behalf of the process, memory allocated by C code, and memory mapped by +syscall.Mmap (because it is not managed by the Go runtime). +.PP +A zero limit or a limit that\[aq]s lower than the amount of memory used +by the Go runtime may cause the garbage collector to run nearly +continuously. +However, the application may still make progress. +.PP +The memory limit is always respected by the Go runtime, so to +effectively disable this behavior, set the limit very high. +math.MaxInt64 is the canonical value for disabling the limit, but values +much greater than the available memory on the underlying system work +just as well. +.PP +See https://go.dev/doc/gc-guide for a detailed guide explaining the soft +memory limit in more detail, as well as a variety of common use-cases +and scenarios. +.PP +SetMemoryLimit returns the previously set memory limit. +A negative input does not adjust the limit, and allows for retrieval of +the currently set memory limit. +.PP +Parameters: +.IP \[bu] 2 +mem-limit - int .SS fscache/clear: Clear the Fs cache. .PP This clears the fs cache. @@ -19313,7 +19588,7 @@ T} T{ Oracle Object Storage T}@T{ -Yes +No T}@T{ Yes T}@T{ @@ -19325,7 +19600,7 @@ Yes T}@T{ Yes T}@T{ -No +Yes T}@T{ No T}@T{ @@ -19545,7 +19820,7 @@ Storj T}@T{ Yes \[dg] T}@T{ -No +Yes T}@T{ Yes T}@T{ @@ -19789,9 +20064,10 @@ These flags are available for every command. -c, --checksum Skip based on checksum (if available) & size, not mod-time & size --client-cert string Client SSL certificate (PEM) for mutual TLS auth --client-key string Client SSL private key (PEM) for mutual TLS auth + --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default \[dq]AUTO\[dq]) --compare-dest stringArray Include additional comma separated server-side paths during comparison --config string Config file (default \[dq]$HOME/.config/rclone/rclone.conf\[dq]) - --contimeout duration Connect timeout (default 1m0s) + --contimeout Duration Connect timeout (default 1m0s) --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cpuprofile string Write cpu profile to file --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default \[dq]HARD\[dq]) @@ -19809,16 +20085,16 @@ These flags are available for every command. --dump-headers Dump HTTP headers - may contain sensitive info --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts --exclude stringArray Exclude files matching pattern - --exclude-from stringArray Read exclude patterns from file (use - to read from stdin) + --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-if-present stringArray Exclude directories if filename is present - --expect-continue-timeout duration Timeout when using expect / 100-continue in HTTP (default 1s) + --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s) --fast-list Use recursive list if available; uses more memory but fewer transactions --files-from stringArray Read list of source-file names from file (use - to read from stdin) --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin) - -f, --filter stringArray Add a file-filtering rule - --filter-from stringArray Read filtering patterns from a file (use - to read from stdin) - --fs-cache-expire-duration duration Cache remotes for this long (0 to disable caching) (default 5m0s) - --fs-cache-expire-interval duration Interval to check for expired remotes (default 1m0s) + -f, --filter stringArray Add a file filtering rule + --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin) + --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s) + --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s) --header stringArray Set HTTP header for all transactions --header-download stringArray Set HTTP header for download transactions --header-upload stringArray Set HTTP header for upload transactions @@ -19832,9 +20108,9 @@ These flags are available for every command. -I, --ignore-times Don\[aq]t skip files that match size and time - transfer all files --immutable Do not modify files, fail if existing files have been modified --include stringArray Include files matching pattern - --include-from stringArray Read include patterns from file (use - to read from stdin) + --include-from stringArray Read file include patterns from file (use - to read from stdin) -i, --interactive Enable interactive mode - --kv-lock-time duration Maximum time to keep key-value database locked by process (default 1s) + --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s) --log-file string Log everything to this file --log-format string Comma separated list of log format options (default \[dq]date,time\[dq]) --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default \[dq]NOTICE\[dq]) @@ -19844,16 +20120,22 @@ These flags are available for every command. --max-backlog int Maximum number of objects in sync or check backlog (default 10000) --max-delete int When synchronizing, limit the number of deletes (default -1) --max-depth int If set limits the recursion depth to this (default -1) - --max-duration duration Maximum duration rclone will transfer data for + --max-duration Duration Maximum duration rclone will transfer data for (default 0s) --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off) --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000) --max-transfer SizeSuffix Maximum size of data to transfer (default off) --memprofile string Write memory profile to file -M, --metadata If set, preserve metadata when copying objects + --metadata-exclude stringArray Exclude metadatas matching pattern + --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin) + --metadata-filter stringArray Add a metadata filtering rule + --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin) + --metadata-include stringArray Include metadatas matching pattern + --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin) --metadata-set stringArray Add metadata key=value when uploading --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) - --modify-window duration Max time diff to be considered the same (default 1ns) + --modify-window Duration Max time diff to be considered the same (default 1ns) --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi) --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4) --no-check-certificate Do not verify the server SSL certificate (insecure) @@ -19869,25 +20151,26 @@ These flags are available for every command. --progress-terminal-title Show progress on the terminal title (requires -P/--progress) -q, --quiet Print as little stuff as possible --rc Enable the remote control server - --rc-addr string IPaddress:Port or :Port to bind server to (default \[dq]localhost:5572\[dq]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) --rc-allow-origin string Set the allowed origin for CORS --rc-baseurl string Prefix for URLs - leave blank for root - --rc-cert string SSL PEM key (concatenation of certificate and CA certificate) + --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with --rc-enable-metrics Enable prometheus metrics on /metrics --rc-files string Path to local files to serve on the HTTP server - --rc-htpasswd string htpasswd file - if not provided no authentication is done - --rc-job-expire-duration duration Expire finished async jobs older than this value (default 1m0s) - --rc-job-expire-interval duration Interval to check for expired async jobs (default 10s) - --rc-key string SSL PEM Private key + --rc-htpasswd string A htpasswd file - if not provided no authentication is done + --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) + --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s) + --rc-key string TLS PEM Private key --rc-max-header-bytes int Maximum size of request header (default 4096) --rc-min-tls-version string Minimum TLS version that is acceptable (default \[dq]tls1.0\[dq]) --rc-no-auth Don\[aq]t require auth for certain methods --rc-pass string Password for authentication - --rc-realm string Realm for authentication (default \[dq]rclone\[dq]) + --rc-realm string Realm for authentication + --rc-salt string Password hashing salt (default \[dq]dlPL2MqE\[dq]) --rc-serve Enable the serving of remote objects - --rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s) - --rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s) + --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --rc-template string User-specified template --rc-user string User name for authentication --rc-web-fetch-url string URL to fetch the releases for webgui (default \[dq]https://api.github.com/repos/rclone/rclone-webui-react/releases/latest\[dq]) @@ -19897,10 +20180,10 @@ These flags are available for every command. --rc-web-gui-update Check and update to latest version of web gui --refresh-times Refresh the modtime of remote files --retries int Retry operations this many times if they fail (default 3) - --retries-sleep duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) + --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s) --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs --size-only Skip based on size only, not mod-time or checksum - --stats duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s) + --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s) --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45) --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default \[dq]INFO\[dq]) --stats-one-line Make the stats fit on one line @@ -19913,7 +20196,7 @@ These flags are available for every command. --syslog Use Syslog for logging --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default \[dq]DAEMON\[dq]) --temp-dir string Directory rclone will use for temporary files (default \[dq]/tmp\[dq]) - --timeout duration IO idle timeout (default 5m0s) + --timeout Duration IO idle timeout (default 5m0s) --tpslimit float Limit HTTP transactions per second to this --tpslimit-burst int Max burst of transactions for --tpslimit (default 1) --track-renames When synchronizing, track file renames and do a server-side move if possible @@ -19924,7 +20207,7 @@ These flags are available for every command. --use-json-log Use json log format --use-mmap Use mmap allocator (see docs) --use-server-modtime Use server modified time instead of object metadata - --user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.60.0\[dq]) + --user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.61.0\[dq]) -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi @@ -19935,529 +20218,543 @@ They control the backends and may be set in the config file. .IP .nf \f[C] - --acd-auth-url string Auth server URL - --acd-client-id string OAuth Client Id - --acd-client-secret string OAuth Client Secret - --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi) - --acd-token string OAuth Access Token as a JSON blob - --acd-token-url string Token server url - --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s) - --alias-remote string Remote or path to alias - --azureblob-access-tier string Access tier of blob: hot, cool or archive - --azureblob-account string Storage Account Name - --azureblob-archive-tier-delete Delete archive tier blobs before overwriting - --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi) - --azureblob-disable-checksum Don\[aq]t store MD5 checksum with object metadata - --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) - --azureblob-endpoint string Endpoint for the service - --azureblob-key string Storage Account Key - --azureblob-list-chunk int Size of blob list (default 5000) - --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any - --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any - --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any - --azureblob-no-head-object If set, do not do HEAD before GET when getting objects - --azureblob-public-access string Public access level of a container: blob or container - --azureblob-sas-url string SAS URL for container level access only - --azureblob-service-principal-file string Path to file containing credentials for use with a service principal - --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16) - --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated) - --azureblob-use-emulator Uses local storage emulator if provided as \[aq]true\[aq] - --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) - --b2-account string Account ID or Application Key ID - --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi) - --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi) - --b2-disable-checksum Disable checksums for large (> upload cutoff) files - --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w) - --b2-download-url string Custom endpoint for downloads - --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --b2-endpoint string Endpoint for the service - --b2-hard-delete Permanently delete files on remote removal, otherwise hide files - --b2-key string Application Key - --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging - --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --b2-version-at Time Show file versions as they were at the specified time (default off) - --b2-versions Include old versions in directory listings - --box-access-token string Box App Primary Access Token - --box-auth-url string Auth server URL - --box-box-config-file string Box App config.json location - --box-box-sub-type string (default \[dq]user\[dq]) - --box-client-id string OAuth Client Id - --box-client-secret string OAuth Client Secret - --box-commit-retries int Max number of times to try committing a multipart file (default 100) - --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot) - --box-list-chunk int Size of listing chunk 1-1000 (default 1000) - --box-owned-by string Only show items owned by the login (email address) passed in - --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point - --box-token string OAuth Access Token as a JSON blob - --box-token-url string Token server url - --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi) - --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s) - --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming - --cache-chunk-path string Directory to cache chunk files (default \[dq]$HOME/.cache/rclone/cache-backend\[dq]) - --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi) - --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi) - --cache-db-path string Directory to store file structure metadata DB (default \[dq]$HOME/.cache/rclone/cache-backend\[dq]) - --cache-db-purge Clear all the cached data for this remote on start - --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) - --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s) - --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server - --cache-plex-password string The password of the Plex user (obscured) - --cache-plex-url string The URL of the Plex server - --cache-plex-username string The username of the Plex user - --cache-read-retries int How many times to retry a read from a cache storage (default 10) - --cache-remote string Remote to cache - --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1) - --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded - --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s) - --cache-workers int How many workers should run in parallel to download chunks (default 4) - --cache-writes Cache file data on writes through the FS - --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi) - --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks - --chunker-hash-type string Choose how chunker handles hash sums (default \[dq]md5\[dq]) - --chunker-remote string Remote to chunk/unchunk - --combine-upstreams SpaceSepList Upstreams for combining - --compress-level int GZIP compression level (-2 to 9) (default -1) - --compress-mode string Compression mode (default \[dq]gzip\[dq]) - --compress-ram-cache-limit SizeSuffix Some remotes don\[aq]t allow the upload of files with unknown size (default 20Mi) - --compress-remote string Remote to compress - -L, --copy-links Follow symlinks and copy the pointed to item - --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true) - --crypt-filename-encoding string How to encode the encrypted filename to text string (default \[dq]base32\[dq]) - --crypt-filename-encryption string How to encrypt the filenames (default \[dq]standard\[dq]) - --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted - --crypt-password string Password or pass phrase for encryption (obscured) - --crypt-password2 string Password or pass phrase for salt (obscured) - --crypt-remote string Remote to encrypt/decrypt - --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs - --crypt-show-mapping For all files listed show how the names encrypt - --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded - --drive-allow-import-name-change Allow the filetype to change when uploading Google docs - --drive-auth-owner-only Only consider files owned by the authenticated user - --drive-auth-url string Auth server URL - --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi) - --drive-client-id string Google Application Client Id - --drive-client-secret string OAuth Client Secret - --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut - --drive-disable-http2 Disable drive using http2 (default true) - --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8) - --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default \[dq]docx,xlsx,pptx,svg\[dq]) - --drive-formats string Deprecated: See export_formats - --drive-impersonate string Impersonate this user when using a service account - --drive-import-formats string Comma separated list of preferred formats for uploading Google docs - --drive-keep-revision-forever Keep new head revision of each file forever - --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000) - --drive-pacer-burst int Number of API calls to allow without sleeping (default 100) - --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms) - --drive-resource-key string Resource key for accessing a link-shared file - --drive-root-folder-id string ID of the root folder - --drive-scope string Scope that rclone should use when requesting access from drive - --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs - --drive-service-account-credentials string Service Account Credentials JSON blob - --drive-service-account-file string Service Account Credentials JSON file path - --drive-shared-with-me Only show files that are shared with me - --drive-size-as-quota Show sizes as storage quota usage, not actual size - --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only - --drive-skip-dangling-shortcuts If set skip dangling shortcut files - --drive-skip-gdocs Skip google documents in all listings - --drive-skip-shortcuts If set skip shortcut files - --drive-starred-only Only show files that are starred - --drive-stop-on-download-limit Make download limit errors be fatal - --drive-stop-on-upload-limit Make upload limit errors be fatal - --drive-team-drive string ID of the Shared Drive (Team Drive) - --drive-token string OAuth Access Token as a JSON blob - --drive-token-url string Token server url - --drive-trashed-only Only show files that are in the trash - --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi) - --drive-use-created-date Use file created date instead of modified date - --drive-use-shared-date Use date file was shared instead of modified date - --drive-use-trash Send files to the trash instead of deleting permanently (default true) - --drive-v2-download-min-size SizeSuffix If Object\[aq]s are greater, use drive v2 API to download (default off) - --dropbox-auth-url string Auth server URL - --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) - --dropbox-batch-mode string Upload file batching sync|async|off (default \[dq]sync\[dq]) - --dropbox-batch-size int Max number of files in upload batch - --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) - --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi) - --dropbox-client-id string OAuth Client Id - --dropbox-client-secret string OAuth Client Secret - --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot) - --dropbox-impersonate string Impersonate this user when using a business account - --dropbox-shared-files Instructs rclone to work on individual shared files - --dropbox-shared-folders Instructs rclone to work on shared folders - --dropbox-token string OAuth Access Token as a JSON blob - --dropbox-token-url string Token server url - --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl - --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot) - --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured) - --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured) - --fichier-shared-folder string If you want to download a shared folder, add this parameter - --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) - --filefabric-permanent-token string Permanent Authentication Token - --filefabric-root-folder-id string ID of the root folder - --filefabric-token string Session Token - --filefabric-token-expiry string Token expiry time - --filefabric-url string URL of the Enterprise File Fabric to connect to - --filefabric-version string Version read from the file fabric - --ftp-ask-password Allow asking for FTP password when needed - --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) - --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited - --ftp-disable-epsv Disable using EPSV even if server advertises support - --ftp-disable-mlsd Disable using MLSD even if server advertises support - --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) - --ftp-disable-utf8 Disable using UTF-8 even if server advertises support - --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot) - --ftp-explicit-tls Use Explicit FTPS (FTP over TLS) - --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD - --ftp-host string FTP host to connect to - --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --ftp-no-check-certificate Do not verify the TLS certificate of the server - --ftp-pass string FTP password (obscured) - --ftp-port int FTP port number (default 21) - --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s) - --ftp-tls Use Implicit FTPS (FTP over TLS) - --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32) - --ftp-user string FTP username (default \[dq]$USER\[dq]) - --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk) - --gcs-anonymous Access public buckets and objects without credentials - --gcs-auth-url string Auth server URL - --gcs-bucket-acl string Access Control List for new buckets - --gcs-bucket-policy-only Access checks should use bucket-level IAM policies - --gcs-client-id string OAuth Client Id - --gcs-client-secret string OAuth Client Secret - --gcs-decompress If set this will decompress gzip encoded objects - --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) - --gcs-endpoint string Endpoint for the service - --gcs-location string Location for the newly created buckets - --gcs-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it - --gcs-object-acl string Access Control List for new objects - --gcs-project-number string Project number - --gcs-service-account-file string Service Account Credentials JSON file path - --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage - --gcs-token string OAuth Access Token as a JSON blob - --gcs-token-url string Token server url - --gphotos-auth-url string Auth server URL - --gphotos-client-id string OAuth Client Id - --gphotos-client-secret string OAuth Client Secret - --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) - --gphotos-include-archived Also view and download archived media - --gphotos-read-only Set to make the Google Photos backend read only - --gphotos-read-size Set to read the size of media items - --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000) - --gphotos-token string OAuth Access Token as a JSON blob - --gphotos-token-url string Token server url - --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default) - --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1) - --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off) - --hasher-remote string Remote to cache checksums for (e.g. myRemote:path) - --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy - --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot) - --hdfs-namenode string Hadoop name node and port - --hdfs-service-principal-name string Kerberos service principal name for the namenode - --hdfs-username string Hadoop user name - --hidrive-auth-url string Auth server URL - --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi) - --hidrive-client-id string OAuth Client Id - --hidrive-client-secret string OAuth Client Secret - --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary - --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot) - --hidrive-endpoint string Endpoint for the service (default \[dq]https://api.hidrive.strato.com/2.1\[dq]) - --hidrive-root-prefix string The root/parent folder for all paths (default \[dq]/\[dq]) - --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default \[dq]rw\[dq]) - --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default \[dq]user\[dq]) - --hidrive-token string OAuth Access Token as a JSON blob - --hidrive-token-url string Token server url - --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4) - --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi) - --http-headers CommaSepList Set HTTP headers for all transactions - --http-no-head Don\[aq]t use HEAD requests - --http-no-slash Set this if the site doesn\[aq]t end directories with / - --http-url string URL of HTTP host to connect to - --internetarchive-access-key-id string IAS3 Access Key - --internetarchive-disable-checksum Don\[aq]t ask the server to test against MD5 checksum calculated by rclone (default true) - --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot) - --internetarchive-endpoint string IAS3 Endpoint (default \[dq]https://s3.us.archive.org\[dq]) - --internetarchive-front-endpoint string Host of InternetArchive Frontend (default \[dq]https://archive.org\[dq]) - --internetarchive-secret-access-key string IAS3 Secret Key (password) - --internetarchive-wait-archive Duration Timeout for waiting the server\[aq]s processing tasks (specifically archive and book_op) to finish (default 0s) - --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot) - --jottacloud-hard-delete Delete files permanently rather than putting them into the trash - --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi) - --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them - --jottacloud-trashed-only Only show files that are in the trash - --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail\[aq]s (default 10Mi) - --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --koofr-endpoint string The Koofr API endpoint to use - --koofr-mountid string Mount ID of the mount to use - --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured) - --koofr-provider string Choose your storage provider - --koofr-setmtime Does the backend support setting modification time (default true) - --koofr-user string Your user name - -l, --links Translate symlinks to/from regular files with a \[aq].rclonelink\[aq] extension - --local-case-insensitive Force the filesystem to report itself as case insensitive - --local-case-sensitive Force the filesystem to report itself as case sensitive - --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot) - --local-no-check-updated Don\[aq]t check to see if the files change during upload - --local-no-preallocate Disable preallocation of disk space for transferred files - --local-no-set-modtime Disable setting modtime - --local-no-sparse Disable sparse files for multi-thread downloads - --local-nounc Disable UNC (long path names) conversion on Windows - --local-unicode-normalization Apply unicode NFC normalization to paths and filenames - --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated) - --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true) - --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --mailru-pass string Password (obscured) - --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true) - --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default \[dq]*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf\[dq]) - --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi) - --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi) - --mailru-user string User name (usually email) - --mega-debug Output more debug from Mega - --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --mega-hard-delete Delete files permanently rather than putting them into the trash - --mega-pass string Password (obscured) - --mega-user string User name - --netstorage-account string Set the NetStorage account name - --netstorage-host string Domain+path of NetStorage host to connect to - --netstorage-protocol string Select between HTTP or HTTPS protocol (default \[dq]https\[dq]) - --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured) - -x, --one-file-system Don\[aq]t cross filesystem boundaries (unix/macOS only) - --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access) - --onedrive-auth-url string Auth server URL - --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi) - --onedrive-client-id string OAuth Client Id - --onedrive-client-secret string OAuth Client Secret - --onedrive-drive-id string The ID of the drive to use - --onedrive-drive-type string The type of the drive (personal | business | documentLibrary) - --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot) - --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings - --onedrive-link-password string Set the password for links created by the link command - --onedrive-link-scope string Set the scope of the links created by the link command (default \[dq]anonymous\[dq]) - --onedrive-link-type string Set the type of the links created by the link command (default \[dq]view\[dq]) - --onedrive-list-chunk int Size of listing chunk (default 1000) - --onedrive-no-versions Remove all versions on modifying operations - --onedrive-region string Choose national cloud region for OneDrive (default \[dq]global\[dq]) - --onedrive-root-folder-id string ID of the root folder - --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs - --onedrive-token string OAuth Access Token as a JSON blob - --onedrive-token-url string Token server url - --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) - --oos-compartment string Object storage compartment OCID - --oos-config-file string Path to OCI config file (default \[dq]\[ti]/.oci/config\[dq]) - --oos-config-profile string Profile name inside the oci config file (default \[dq]Default\[dq]) - --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) - --oos-copy-timeout Duration Timeout for copy (default 1m0s) - --oos-disable-checksum Don\[aq]t store MD5 checksum with object metadata - --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --oos-endpoint string Endpoint for Object storage API - --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery - --oos-namespace string Object storage namespace - --oos-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it - --oos-provider string Choose your Auth Provider (default \[dq]env_auth\[dq]) - --oos-region string Object storage Region - --oos-upload-concurrency int Concurrency for multipart uploads (default 10) - --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi) - --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) - --opendrive-password string Password (obscured) - --opendrive-username string Username - --pcloud-auth-url string Auth server URL - --pcloud-client-id string OAuth Client Id - --pcloud-client-secret string OAuth Client Secret - --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --pcloud-hostname string Hostname to connect to (default \[dq]api.pcloud.com\[dq]) - --pcloud-password string Your pcloud password (obscured) - --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default \[dq]d0\[dq]) - --pcloud-token string OAuth Access Token as a JSON blob - --pcloud-token-url string Token server url - --pcloud-username string Your pcloud username - --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) - --qingstor-access-key-id string QingStor Access Key ID - --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi) - --qingstor-connection-retries int Number of connection retries (default 3) - --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8) - --qingstor-endpoint string Enter an endpoint URL to connection QingStor API - --qingstor-env-auth Get QingStor credentials from runtime - --qingstor-secret-access-key string QingStor Secret Access Key (password) - --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1) - --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --qingstor-zone string Zone to connect to - --s3-access-key-id string AWS Access Key ID - --s3-acl string Canned ACL used when creating buckets and storing or copying objects - --s3-bucket-acl string Canned ACL used when creating buckets - --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) - --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) - --s3-decompress If set this will decompress gzip encoded objects - --s3-disable-checksum Don\[aq]t store MD5 checksum with object metadata - --s3-disable-http2 Disable usage of http2 for S3 backends - --s3-download-url string Custom endpoint for downloads - --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) - --s3-endpoint string Endpoint for S3 API - --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars) - --s3-force-path-style If true use path style access if false use virtual hosted style (default true) - --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery - --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000) - --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset) - --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto - --s3-location-constraint string Location constraint - must be set to match the Region - --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000) - --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) - --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool - --s3-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it - --s3-no-head If set, don\[aq]t HEAD uploaded objects to check integrity - --s3-no-head-object If set, do not do HEAD before GET when getting objects - --s3-no-system-metadata Suppress setting and reading of system metadata - --s3-profile string Profile to use in the shared credentials file - --s3-provider string Choose your S3 provider - --s3-region string Region to connect to - --s3-requester-pays Enables requester pays option when interacting with S3 bucket - --s3-secret-access-key string AWS Secret Access Key (password) - --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 - --s3-session-token string An AWS session token - --s3-shared-credentials-file string Path to the shared credentials file - --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3 - --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data - --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data - --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) - --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key - --s3-storage-class string The storage class to use when storing new objects in S3 - --s3-upload-concurrency int Concurrency for multipart uploads (default 4) - --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) - --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint - --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) - --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads - --s3-v2-auth If true use v2 authentication - --s3-version-at Time Show file versions as they were at the specified time (default off) - --s3-versions Include old versions in directory listings - --seafile-2fa Two-factor authentication (\[aq]true\[aq] if the account has 2FA enabled) - --seafile-create-library Should rclone create a library if it doesn\[aq]t exist - --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8) - --seafile-library string Name of the library - --seafile-library-key string Library password (for encrypted libraries only) (obscured) - --seafile-pass string Password (obscured) - --seafile-url string URL of seafile host to connect to - --seafile-user string User name (usually email address) - --sftp-ask-password Allow asking for SFTP password when needed - --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki) - --sftp-concurrency int The maximum number of outstanding requests for one file (default 64) - --sftp-disable-concurrent-reads If set don\[aq]t use concurrent reads - --sftp-disable-concurrent-writes If set don\[aq]t use concurrent writes - --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available - --sftp-host string SSH host to connect to - --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --sftp-key-file string Path to PEM-encoded private key file - --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured) - --sftp-key-pem string Raw PEM-encoded private key - --sftp-key-use-agent When set forces the usage of the ssh-agent - --sftp-known-hosts-file string Optional path to known_hosts file - --sftp-md5sum-command string The command used to read md5 hashes - --sftp-pass string SSH password, leave blank to use ssh-agent (obscured) - --sftp-path-override string Override path used by SSH shell commands - --sftp-port int SSH port number (default 22) - --sftp-pubkey-file string Optional path to public key file - --sftp-server-command string Specifies the path or command to run a sftp server on the remote host - --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands - --sftp-set-modtime Set the modified time on the remote if set (default true) - --sftp-sha1sum-command string The command used to read sha1 hashes - --sftp-shell-type string The type of SSH shell on remote server, if any - --sftp-skip-links Set to skip any symlinks and any other non regular files - --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default \[dq]sftp\[dq]) - --sftp-use-fstat If set use fstat instead of stat - --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods - --sftp-user string SSH username (default \[dq]$USER\[dq]) - --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi) - --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot) - --sharefile-endpoint string Endpoint for API calls - --sharefile-root-folder-id string ID of the root folder - --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi) - --sia-api-password string Sia Daemon API Password (obscured) - --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default \[dq]http://127.0.0.1:9980\[dq]) - --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot) - --sia-user-agent string Siad User Agent (default \[dq]Sia-Agent\[dq]) - --skip-links Don\[aq]t warn about skipped symlinks - --smb-case-insensitive Whether the server is configured to be case-insensitive (default true) - --smb-domain string Domain name for NTLM authentication (default \[dq]WORKGROUP\[dq]) - --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot) - --smb-hide-special-share Hide special shares (e.g. print$) which users aren\[aq]t supposed to access (default true) - --smb-host string SMB server hostname to connect to - --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s) - --smb-pass string SMB password (obscured) - --smb-port int SMB port number (default 445) - --smb-user string SMB username (default \[dq]$USER\[dq]) - --storj-access-grant string Access grant - --storj-api-key string API key - --storj-passphrase string Encryption passphrase - --storj-provider string Choose an authentication method (default \[dq]existing\[dq]) - --storj-satellite-address string Satellite address (default \[dq]us-central-1.storj.io\[dq]) - --sugarsync-access-key-id string Sugarsync Access Key ID - --sugarsync-app-id string Sugarsync App ID - --sugarsync-authorization string Sugarsync authorization - --sugarsync-authorization-expiry string Sugarsync authorization expiry - --sugarsync-deleted-id string Sugarsync deleted folder id - --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot) - --sugarsync-hard-delete Permanently delete files if true - --sugarsync-private-access-key string Sugarsync Private Access Key - --sugarsync-refresh-token string Sugarsync refresh token - --sugarsync-root-id string Sugarsync root id - --sugarsync-user string Sugarsync user - --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID) - --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME) - --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET) - --swift-auth string Authentication URL for server (OS_AUTH_URL) - --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) - --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) - --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi) - --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) - --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8) - --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default \[dq]public\[dq]) - --swift-env-auth Get swift credentials from environment variables in standard OpenStack form - --swift-key string API key or password (OS_PASSWORD) - --swift-leave-parts-on-error If true avoid calling abort upload on a failure - --swift-no-chunk Don\[aq]t chunk files during streaming upload - --swift-no-large-objects Disable support for static and dynamic large objects - --swift-region string Region name - optional (OS_REGION_NAME) - --swift-storage-policy string The storage policy to use when creating a new container - --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) - --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) - --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) - --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) - --swift-user string User name to log in (OS_USERNAME) - --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID) - --union-action-policy string Policy to choose upstream on ACTION category (default \[dq]epall\[dq]) - --union-cache-time int Cache time of usage and free space (in seconds) (default 120) - --union-create-policy string Policy to choose upstream on CREATE category (default \[dq]epmfs\[dq]) - --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi) - --union-search-policy string Policy to choose upstream on SEARCH category (default \[dq]ff\[dq]) - --union-upstreams string List of space separated upstreams - --uptobox-access-token string Your access token - --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) - --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) - --webdav-bearer-token-command string Command to run to get a bearer token - --webdav-encoding string The encoding for the backend - --webdav-headers CommaSepList Set HTTP headers for all transactions - --webdav-pass string Password (obscured) - --webdav-url string URL of http host to connect to - --webdav-user string User name - --webdav-vendor string Name of the WebDAV site/service/software you are using - --yandex-auth-url string Auth server URL - --yandex-client-id string OAuth Client Id - --yandex-client-secret string OAuth Client Secret - --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) - --yandex-hard-delete Delete files permanently rather than putting them into the trash - --yandex-token string OAuth Access Token as a JSON blob - --yandex-token-url string Token server url - --zoho-auth-url string Auth server URL - --zoho-client-id string OAuth Client Id - --zoho-client-secret string OAuth Client Secret - --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8) - --zoho-region string Zoho region to connect to - --zoho-token string OAuth Access Token as a JSON blob - --zoho-token-url string Token server url + --acd-auth-url string Auth server URL + --acd-client-id string OAuth Client Id + --acd-client-secret string OAuth Client Secret + --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi) + --acd-token string OAuth Access Token as a JSON blob + --acd-token-url string Token server url + --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s) + --alias-remote string Remote or path to alias + --azureblob-access-tier string Access tier of blob: hot, cool or archive + --azureblob-account string Azure Storage Account Name + --azureblob-archive-tier-delete Delete archive tier blobs before overwriting + --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi) + --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured) + --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key + --azureblob-client-id string The ID of the client in use + --azureblob-client-secret string One of the service principal\[aq]s client secrets + --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth + --azureblob-disable-checksum Don\[aq]t store MD5 checksum with object metadata + --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) + --azureblob-endpoint string Endpoint for the service + --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI) + --azureblob-key string Storage Account Shared Key + --azureblob-list-chunk int Size of blob list (default 5000) + --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any + --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any + --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any + --azureblob-no-check-container If set, don\[aq]t attempt to check the container exists or create it + --azureblob-no-head-object If set, do not do HEAD before GET when getting objects + --azureblob-password string The user\[aq]s password (obscured) + --azureblob-public-access string Public access level of a container: blob or container + --azureblob-sas-url string SAS URL for container level access only + --azureblob-service-principal-file string Path to file containing credentials for use with a service principal + --azureblob-tenant string ID of the service principal\[aq]s tenant. Also called its directory ID + --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16) + --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated) + --azureblob-use-emulator Uses local storage emulator if provided as \[aq]true\[aq] + --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) + --azureblob-username string User name (usually an email address) + --b2-account string Account ID or Application Key ID + --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi) + --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi) + --b2-disable-checksum Disable checksums for large (> upload cutoff) files + --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w) + --b2-download-url string Custom endpoint for downloads + --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --b2-endpoint string Endpoint for the service + --b2-hard-delete Permanently delete files on remote removal, otherwise hide files + --b2-key string Application Key + --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging + --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --b2-version-at Time Show file versions as they were at the specified time (default off) + --b2-versions Include old versions in directory listings + --box-access-token string Box App Primary Access Token + --box-auth-url string Auth server URL + --box-box-config-file string Box App config.json location + --box-box-sub-type string (default \[dq]user\[dq]) + --box-client-id string OAuth Client Id + --box-client-secret string OAuth Client Secret + --box-commit-retries int Max number of times to try committing a multipart file (default 100) + --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot) + --box-list-chunk int Size of listing chunk 1-1000 (default 1000) + --box-owned-by string Only show items owned by the login (email address) passed in + --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point + --box-token string OAuth Access Token as a JSON blob + --box-token-url string Token server url + --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi) + --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s) + --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming + --cache-chunk-path string Directory to cache chunk files (default \[dq]$HOME/.cache/rclone/cache-backend\[dq]) + --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi) + --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi) + --cache-db-path string Directory to store file structure metadata DB (default \[dq]$HOME/.cache/rclone/cache-backend\[dq]) + --cache-db-purge Clear all the cached data for this remote on start + --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s) + --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s) + --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server + --cache-plex-password string The password of the Plex user (obscured) + --cache-plex-url string The URL of the Plex server + --cache-plex-username string The username of the Plex user + --cache-read-retries int How many times to retry a read from a cache storage (default 10) + --cache-remote string Remote to cache + --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1) + --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded + --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s) + --cache-workers int How many workers should run in parallel to download chunks (default 4) + --cache-writes Cache file data on writes through the FS + --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi) + --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks + --chunker-hash-type string Choose how chunker handles hash sums (default \[dq]md5\[dq]) + --chunker-remote string Remote to chunk/unchunk + --combine-upstreams SpaceSepList Upstreams for combining + --compress-level int GZIP compression level (-2 to 9) (default -1) + --compress-mode string Compression mode (default \[dq]gzip\[dq]) + --compress-ram-cache-limit SizeSuffix Some remotes don\[aq]t allow the upload of files with unknown size (default 20Mi) + --compress-remote string Remote to compress + -L, --copy-links Follow symlinks and copy the pointed to item + --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true) + --crypt-filename-encoding string How to encode the encrypted filename to text string (default \[dq]base32\[dq]) + --crypt-filename-encryption string How to encrypt the filenames (default \[dq]standard\[dq]) + --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted + --crypt-password string Password or pass phrase for encryption (obscured) + --crypt-password2 string Password or pass phrase for salt (obscured) + --crypt-remote string Remote to encrypt/decrypt + --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs + --crypt-show-mapping For all files listed show how the names encrypt + --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded + --drive-allow-import-name-change Allow the filetype to change when uploading Google docs + --drive-auth-owner-only Only consider files owned by the authenticated user + --drive-auth-url string Auth server URL + --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi) + --drive-client-id string Google Application Client Id + --drive-client-secret string OAuth Client Secret + --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut + --drive-disable-http2 Disable drive using http2 (default true) + --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8) + --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default \[dq]docx,xlsx,pptx,svg\[dq]) + --drive-formats string Deprecated: See export_formats + --drive-impersonate string Impersonate this user when using a service account + --drive-import-formats string Comma separated list of preferred formats for uploading Google docs + --drive-keep-revision-forever Keep new head revision of each file forever + --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000) + --drive-pacer-burst int Number of API calls to allow without sleeping (default 100) + --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms) + --drive-resource-key string Resource key for accessing a link-shared file + --drive-root-folder-id string ID of the root folder + --drive-scope string Scope that rclone should use when requesting access from drive + --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs + --drive-service-account-credentials string Service Account Credentials JSON blob + --drive-service-account-file string Service Account Credentials JSON file path + --drive-shared-with-me Only show files that are shared with me + --drive-size-as-quota Show sizes as storage quota usage, not actual size + --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only + --drive-skip-dangling-shortcuts If set skip dangling shortcut files + --drive-skip-gdocs Skip google documents in all listings + --drive-skip-shortcuts If set skip shortcut files + --drive-starred-only Only show files that are starred + --drive-stop-on-download-limit Make download limit errors be fatal + --drive-stop-on-upload-limit Make upload limit errors be fatal + --drive-team-drive string ID of the Shared Drive (Team Drive) + --drive-token string OAuth Access Token as a JSON blob + --drive-token-url string Token server url + --drive-trashed-only Only show files that are in the trash + --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi) + --drive-use-created-date Use file created date instead of modified date + --drive-use-shared-date Use date file was shared instead of modified date + --drive-use-trash Send files to the trash instead of deleting permanently (default true) + --drive-v2-download-min-size SizeSuffix If Object\[aq]s are greater, use drive v2 API to download (default off) + --dropbox-auth-url string Auth server URL + --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) + --dropbox-batch-mode string Upload file batching sync|async|off (default \[dq]sync\[dq]) + --dropbox-batch-size int Max number of files in upload batch + --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s) + --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi) + --dropbox-client-id string OAuth Client Id + --dropbox-client-secret string OAuth Client Secret + --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot) + --dropbox-impersonate string Impersonate this user when using a business account + --dropbox-shared-files Instructs rclone to work on individual shared files + --dropbox-shared-folders Instructs rclone to work on shared folders + --dropbox-token string OAuth Access Token as a JSON blob + --dropbox-token-url string Token server url + --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl + --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot) + --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured) + --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured) + --fichier-shared-folder string If you want to download a shared folder, add this parameter + --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) + --filefabric-permanent-token string Permanent Authentication Token + --filefabric-root-folder-id string ID of the root folder + --filefabric-token string Session Token + --filefabric-token-expiry string Token expiry time + --filefabric-url string URL of the Enterprise File Fabric to connect to + --filefabric-version string Version read from the file fabric + --ftp-ask-password Allow asking for FTP password when needed + --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) + --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited + --ftp-disable-epsv Disable using EPSV even if server advertises support + --ftp-disable-mlsd Disable using MLSD even if server advertises support + --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS) + --ftp-disable-utf8 Disable using UTF-8 even if server advertises support + --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot) + --ftp-explicit-tls Use Explicit FTPS (FTP over TLS) + --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD + --ftp-host string FTP host to connect to + --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --ftp-no-check-certificate Do not verify the TLS certificate of the server + --ftp-pass string FTP password (obscured) + --ftp-port int FTP port number (default 21) + --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s) + --ftp-tls Use Implicit FTPS (FTP over TLS) + --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32) + --ftp-user string FTP username (default \[dq]$USER\[dq]) + --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk) + --gcs-anonymous Access public buckets and objects without credentials + --gcs-auth-url string Auth server URL + --gcs-bucket-acl string Access Control List for new buckets + --gcs-bucket-policy-only Access checks should use bucket-level IAM policies + --gcs-client-id string OAuth Client Id + --gcs-client-secret string OAuth Client Secret + --gcs-decompress If set this will decompress gzip encoded objects + --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) + --gcs-endpoint string Endpoint for the service + --gcs-location string Location for the newly created buckets + --gcs-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it + --gcs-object-acl string Access Control List for new objects + --gcs-project-number string Project number + --gcs-service-account-file string Service Account Credentials JSON file path + --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage + --gcs-token string OAuth Access Token as a JSON blob + --gcs-token-url string Token server url + --gphotos-auth-url string Auth server URL + --gphotos-client-id string OAuth Client Id + --gphotos-client-secret string OAuth Client Secret + --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot) + --gphotos-include-archived Also view and download archived media + --gphotos-read-only Set to make the Google Photos backend read only + --gphotos-read-size Set to read the size of media items + --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000) + --gphotos-token string OAuth Access Token as a JSON blob + --gphotos-token-url string Token server url + --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default) + --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1) + --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off) + --hasher-remote string Remote to cache checksums for (e.g. myRemote:path) + --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy + --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot) + --hdfs-namenode string Hadoop name node and port + --hdfs-service-principal-name string Kerberos service principal name for the namenode + --hdfs-username string Hadoop user name + --hidrive-auth-url string Auth server URL + --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi) + --hidrive-client-id string OAuth Client Id + --hidrive-client-secret string OAuth Client Secret + --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary + --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot) + --hidrive-endpoint string Endpoint for the service (default \[dq]https://api.hidrive.strato.com/2.1\[dq]) + --hidrive-root-prefix string The root/parent folder for all paths (default \[dq]/\[dq]) + --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default \[dq]rw\[dq]) + --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default \[dq]user\[dq]) + --hidrive-token string OAuth Access Token as a JSON blob + --hidrive-token-url string Token server url + --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4) + --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi) + --http-headers CommaSepList Set HTTP headers for all transactions + --http-no-head Don\[aq]t use HEAD requests + --http-no-slash Set this if the site doesn\[aq]t end directories with / + --http-url string URL of HTTP host to connect to + --internetarchive-access-key-id string IAS3 Access Key + --internetarchive-disable-checksum Don\[aq]t ask the server to test against MD5 checksum calculated by rclone (default true) + --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot) + --internetarchive-endpoint string IAS3 Endpoint (default \[dq]https://s3.us.archive.org\[dq]) + --internetarchive-front-endpoint string Host of InternetArchive Frontend (default \[dq]https://archive.org\[dq]) + --internetarchive-secret-access-key string IAS3 Secret Key (password) + --internetarchive-wait-archive Duration Timeout for waiting the server\[aq]s processing tasks (specifically archive and book_op) to finish (default 0s) + --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot) + --jottacloud-hard-delete Delete files permanently rather than putting them into the trash + --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi) + --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them + --jottacloud-trashed-only Only show files that are in the trash + --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail\[aq]s (default 10Mi) + --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --koofr-endpoint string The Koofr API endpoint to use + --koofr-mountid string Mount ID of the mount to use + --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured) + --koofr-provider string Choose your storage provider + --koofr-setmtime Does the backend support setting modification time (default true) + --koofr-user string Your user name + -l, --links Translate symlinks to/from regular files with a \[aq].rclonelink\[aq] extension + --local-case-insensitive Force the filesystem to report itself as case insensitive + --local-case-sensitive Force the filesystem to report itself as case sensitive + --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot) + --local-no-check-updated Don\[aq]t check to see if the files change during upload + --local-no-preallocate Disable preallocation of disk space for transferred files + --local-no-set-modtime Disable setting modtime + --local-no-sparse Disable sparse files for multi-thread downloads + --local-nounc Disable UNC (long path names) conversion on Windows + --local-unicode-normalization Apply unicode NFC normalization to paths and filenames + --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated) + --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true) + --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --mailru-pass string Password (obscured) + --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true) + --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default \[dq]*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf\[dq]) + --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi) + --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi) + --mailru-user string User name (usually email) + --mega-debug Output more debug from Mega + --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --mega-hard-delete Delete files permanently rather than putting them into the trash + --mega-pass string Password (obscured) + --mega-user string User name + --netstorage-account string Set the NetStorage account name + --netstorage-host string Domain+path of NetStorage host to connect to + --netstorage-protocol string Select between HTTP or HTTPS protocol (default \[dq]https\[dq]) + --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured) + -x, --one-file-system Don\[aq]t cross filesystem boundaries (unix/macOS only) + --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access) + --onedrive-auth-url string Auth server URL + --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi) + --onedrive-client-id string OAuth Client Id + --onedrive-client-secret string OAuth Client Secret + --onedrive-drive-id string The ID of the drive to use + --onedrive-drive-type string The type of the drive (personal | business | documentLibrary) + --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot) + --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings + --onedrive-link-password string Set the password for links created by the link command + --onedrive-link-scope string Set the scope of the links created by the link command (default \[dq]anonymous\[dq]) + --onedrive-link-type string Set the type of the links created by the link command (default \[dq]view\[dq]) + --onedrive-list-chunk int Size of listing chunk (default 1000) + --onedrive-no-versions Remove all versions on modifying operations + --onedrive-region string Choose national cloud region for OneDrive (default \[dq]global\[dq]) + --onedrive-root-folder-id string ID of the root folder + --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs + --onedrive-token string OAuth Access Token as a JSON blob + --onedrive-token-url string Token server url + --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) + --oos-compartment string Object storage compartment OCID + --oos-config-file string Path to OCI config file (default \[dq]\[ti]/.oci/config\[dq]) + --oos-config-profile string Profile name inside the oci config file (default \[dq]Default\[dq]) + --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) + --oos-copy-timeout Duration Timeout for copy (default 1m0s) + --oos-disable-checksum Don\[aq]t store MD5 checksum with object metadata + --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --oos-endpoint string Endpoint for Object storage API + --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery + --oos-namespace string Object storage namespace + --oos-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it + --oos-provider string Choose your Auth Provider (default \[dq]env_auth\[dq]) + --oos-region string Object storage Region + --oos-upload-concurrency int Concurrency for multipart uploads (default 10) + --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi) + --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) + --opendrive-password string Password (obscured) + --opendrive-username string Username + --pcloud-auth-url string Auth server URL + --pcloud-client-id string OAuth Client Id + --pcloud-client-secret string OAuth Client Secret + --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --pcloud-hostname string Hostname to connect to (default \[dq]api.pcloud.com\[dq]) + --pcloud-password string Your pcloud password (obscured) + --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default \[dq]d0\[dq]) + --pcloud-token string OAuth Access Token as a JSON blob + --pcloud-token-url string Token server url + --pcloud-username string Your pcloud username + --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot) + --qingstor-access-key-id string QingStor Access Key ID + --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi) + --qingstor-connection-retries int Number of connection retries (default 3) + --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8) + --qingstor-endpoint string Enter an endpoint URL to connection QingStor API + --qingstor-env-auth Get QingStor credentials from runtime + --qingstor-secret-access-key string QingStor Secret Access Key (password) + --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1) + --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --qingstor-zone string Zone to connect to + --s3-access-key-id string AWS Access Key ID + --s3-acl string Canned ACL used when creating buckets and storing or copying objects + --s3-bucket-acl string Canned ACL used when creating buckets + --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi) + --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi) + --s3-decompress If set this will decompress gzip encoded objects + --s3-disable-checksum Don\[aq]t store MD5 checksum with object metadata + --s3-disable-http2 Disable usage of http2 for S3 backends + --s3-download-url string Custom endpoint for downloads + --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot) + --s3-endpoint string Endpoint for S3 API + --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars) + --s3-force-path-style If true use path style access if false use virtual hosted style (default true) + --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery + --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000) + --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset) + --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto + --s3-location-constraint string Location constraint - must be set to match the Region + --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000) + --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s) + --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool + --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset) + --s3-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it + --s3-no-head If set, don\[aq]t HEAD uploaded objects to check integrity + --s3-no-head-object If set, do not do HEAD before GET when getting objects + --s3-no-system-metadata Suppress setting and reading of system metadata + --s3-profile string Profile to use in the shared credentials file + --s3-provider string Choose your S3 provider + --s3-region string Region to connect to + --s3-requester-pays Enables requester pays option when interacting with S3 bucket + --s3-secret-access-key string AWS Secret Access Key (password) + --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 + --s3-session-token string An AWS session token + --s3-shared-credentials-file string Path to the shared credentials file + --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3 + --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data + --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data + --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) + --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key + --s3-storage-class string The storage class to use when storing new objects in S3 + --s3-upload-concurrency int Concurrency for multipart uploads (default 4) + --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) + --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint + --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) + --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads + --s3-v2-auth If true use v2 authentication + --s3-version-at Time Show file versions as they were at the specified time (default off) + --s3-versions Include old versions in directory listings + --seafile-2fa Two-factor authentication (\[aq]true\[aq] if the account has 2FA enabled) + --seafile-create-library Should rclone create a library if it doesn\[aq]t exist + --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8) + --seafile-library string Name of the library + --seafile-library-key string Library password (for encrypted libraries only) (obscured) + --seafile-pass string Password (obscured) + --seafile-url string URL of seafile host to connect to + --seafile-user string User name (usually email address) + --sftp-ask-password Allow asking for SFTP password when needed + --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki) + --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference + --sftp-concurrency int The maximum number of outstanding requests for one file (default 64) + --sftp-disable-concurrent-reads If set don\[aq]t use concurrent reads + --sftp-disable-concurrent-writes If set don\[aq]t use concurrent writes + --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available + --sftp-host string SSH host to connect to + --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference + --sftp-key-file string Path to PEM-encoded private key file + --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured) + --sftp-key-pem string Raw PEM-encoded private key + --sftp-key-use-agent When set forces the usage of the ssh-agent + --sftp-known-hosts-file string Optional path to known_hosts file + --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference + --sftp-md5sum-command string The command used to read md5 hashes + --sftp-pass string SSH password, leave blank to use ssh-agent (obscured) + --sftp-path-override string Override path used by SSH shell commands + --sftp-port int SSH port number (default 22) + --sftp-pubkey-file string Optional path to public key file + --sftp-server-command string Specifies the path or command to run a sftp server on the remote host + --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands + --sftp-set-modtime Set the modified time on the remote if set (default true) + --sftp-sha1sum-command string The command used to read sha1 hashes + --sftp-shell-type string The type of SSH shell on remote server, if any + --sftp-skip-links Set to skip any symlinks and any other non regular files + --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default \[dq]sftp\[dq]) + --sftp-use-fstat If set use fstat instead of stat + --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods + --sftp-user string SSH username (default \[dq]$USER\[dq]) + --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi) + --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot) + --sharefile-endpoint string Endpoint for API calls + --sharefile-root-folder-id string ID of the root folder + --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi) + --sia-api-password string Sia Daemon API Password (obscured) + --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default \[dq]http://127.0.0.1:9980\[dq]) + --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot) + --sia-user-agent string Siad User Agent (default \[dq]Sia-Agent\[dq]) + --skip-links Don\[aq]t warn about skipped symlinks + --smb-case-insensitive Whether the server is configured to be case-insensitive (default true) + --smb-domain string Domain name for NTLM authentication (default \[dq]WORKGROUP\[dq]) + --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot) + --smb-hide-special-share Hide special shares (e.g. print$) which users aren\[aq]t supposed to access (default true) + --smb-host string SMB server hostname to connect to + --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s) + --smb-pass string SMB password (obscured) + --smb-port int SMB port number (default 445) + --smb-user string SMB username (default \[dq]$USER\[dq]) + --storj-access-grant string Access grant + --storj-api-key string API key + --storj-passphrase string Encryption passphrase + --storj-provider string Choose an authentication method (default \[dq]existing\[dq]) + --storj-satellite-address string Satellite address (default \[dq]us-central-1.storj.io\[dq]) + --sugarsync-access-key-id string Sugarsync Access Key ID + --sugarsync-app-id string Sugarsync App ID + --sugarsync-authorization string Sugarsync authorization + --sugarsync-authorization-expiry string Sugarsync authorization expiry + --sugarsync-deleted-id string Sugarsync deleted folder id + --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot) + --sugarsync-hard-delete Permanently delete files if true + --sugarsync-private-access-key string Sugarsync Private Access Key + --sugarsync-refresh-token string Sugarsync refresh token + --sugarsync-root-id string Sugarsync root id + --sugarsync-user string Sugarsync user + --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID) + --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME) + --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET) + --swift-auth string Authentication URL for server (OS_AUTH_URL) + --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) + --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) + --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi) + --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) + --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8) + --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default \[dq]public\[dq]) + --swift-env-auth Get swift credentials from environment variables in standard OpenStack form + --swift-key string API key or password (OS_PASSWORD) + --swift-leave-parts-on-error If true avoid calling abort upload on a failure + --swift-no-chunk Don\[aq]t chunk files during streaming upload + --swift-no-large-objects Disable support for static and dynamic large objects + --swift-region string Region name - optional (OS_REGION_NAME) + --swift-storage-policy string The storage policy to use when creating a new container + --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) + --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) + --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) + --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) + --swift-user string User name to log in (OS_USERNAME) + --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID) + --union-action-policy string Policy to choose upstream on ACTION category (default \[dq]epall\[dq]) + --union-cache-time int Cache time of usage and free space (in seconds) (default 120) + --union-create-policy string Policy to choose upstream on CREATE category (default \[dq]epmfs\[dq]) + --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi) + --union-search-policy string Policy to choose upstream on SEARCH category (default \[dq]ff\[dq]) + --union-upstreams string List of space separated upstreams + --uptobox-access-token string Your access token + --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) + --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) + --webdav-bearer-token-command string Command to run to get a bearer token + --webdav-encoding string The encoding for the backend + --webdav-headers CommaSepList Set HTTP headers for all transactions + --webdav-pass string Password (obscured) + --webdav-url string URL of http host to connect to + --webdav-user string User name + --webdav-vendor string Name of the WebDAV site/service/software you are using + --yandex-auth-url string Auth server URL + --yandex-client-id string OAuth Client Id + --yandex-client-secret string OAuth Client Secret + --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) + --yandex-hard-delete Delete files permanently rather than putting them into the trash + --yandex-token string OAuth Access Token as a JSON blob + --yandex-token-url string Token server url + --zoho-auth-url string Auth server URL + --zoho-client-id string OAuth Client Id + --zoho-client-secret string OAuth Client Secret + --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8) + --zoho-region string Zoho region to connect to + --zoho-token string OAuth Access Token as a JSON blob + --zoho-token-url string Token server url \f[R] .fi .SH Docker Volume Plugin @@ -23136,9 +23433,10 @@ Token server url - leave blank to use Amazon\[aq]s. token_url> Optional token URL Remote config Make sure your Redirect URL is set to \[dq]http://127.0.0.1:53682/\[dq] in your custom config. -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -23479,6 +23777,8 @@ IDrive e2 .IP \[bu] 2 IONOS Cloud .IP \[bu] 2 +Liara Object Storage +.IP \[bu] 2 Minio .IP \[bu] 2 Qiniu Cloud Object Storage (Kodo) @@ -23566,7 +23866,7 @@ name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Ceph, ChinaMobile, ArvanCloud, Dreamhost, IBM COS, Minio, and Tencent COS +XX / Amazon S3 Compliant Storage Providers including AWS, Ceph, ChinaMobile, ArvanCloud, Dreamhost, IBM COS, Liara, Minio, and Tencent COS \[rs] \[dq]s3\[dq] [snip] Storage> s3 @@ -23576,7 +23876,7 @@ Choose a number from below, or type in your own value \[rs] \[dq]AWS\[dq] 2 / Ceph Object Storage \[rs] \[dq]Ceph\[dq] - 3 / Digital Ocean Spaces + 3 / DigitalOcean Spaces \[rs] \[dq]DigitalOcean\[dq] 4 / Dreamhost DreamObjects \[rs] \[dq]Dreamhost\[dq] @@ -24267,9 +24567,9 @@ all the files to be uploaded as multipart. .PP Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, -Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, -IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, -SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). +Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, +IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, +Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). .SS --s3-provider .PP Choose your S3 provider. @@ -24326,7 +24626,7 @@ Arvan Cloud Object Storage (AOS) \[dq]DigitalOcean\[dq] .RS 2 .IP \[bu] 2 -Digital Ocean Spaces +DigitalOcean Spaces .RE .IP \[bu] 2 \[dq]Dreamhost\[dq] @@ -24365,6 +24665,12 @@ IONOS Cloud Seagate Lyve Cloud .RE .IP \[bu] 2 +\[dq]Liara\[dq] +.RS 2 +.IP \[bu] 2 +Liara Object Storage +.RE +.IP \[bu] 2 \[dq]Minio\[dq] .RS 2 .IP \[bu] 2 @@ -25150,7 +25456,7 @@ Config: region Env Var: RCLONE_S3_REGION .IP \[bu] 2 Provider: -!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive +!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive .IP \[bu] 2 Type: string .IP \[bu] 2 @@ -25861,6 +26167,33 @@ Logrono, Spain .RE .SS --s3-endpoint .PP +Endpoint for Liara Object Storage API. +.PP +Properties: +.IP \[bu] 2 +Config: endpoint +.IP \[bu] 2 +Env Var: RCLONE_S3_ENDPOINT +.IP \[bu] 2 +Provider: Liara +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.IP \[bu] 2 +Examples: +.RS 2 +.IP \[bu] 2 +\[dq]storage.iran.liara.space\[dq] +.RS 2 +.IP \[bu] 2 +The default endpoint +.IP \[bu] 2 +Iran +.RE +.RE +.SS --s3-endpoint +.PP Endpoint for OSS API. .PP Properties: @@ -26588,7 +26921,7 @@ Config: endpoint Env Var: RCLONE_S3_ENDPOINT .IP \[bu] 2 Provider: -!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu +!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu .IP \[bu] 2 Type: string .IP \[bu] 2 @@ -26603,22 +26936,40 @@ Examples: Dream Objects endpoint .RE .IP \[bu] 2 +\[dq]syd1.digitaloceanspaces.com\[dq] +.RS 2 +.IP \[bu] 2 +DigitalOcean Spaces Sydney 1 +.RE +.IP \[bu] 2 +\[dq]sfo3.digitaloceanspaces.com\[dq] +.RS 2 +.IP \[bu] 2 +DigitalOcean Spaces San Francisco 3 +.RE +.IP \[bu] 2 +\[dq]fra1.digitaloceanspaces.com\[dq] +.RS 2 +.IP \[bu] 2 +DigitalOcean Spaces Frankfurt 1 +.RE +.IP \[bu] 2 \[dq]nyc3.digitaloceanspaces.com\[dq] .RS 2 .IP \[bu] 2 -Digital Ocean Spaces New York 3 +DigitalOcean Spaces New York 3 .RE .IP \[bu] 2 \[dq]ams3.digitaloceanspaces.com\[dq] .RS 2 .IP \[bu] 2 -Digital Ocean Spaces Amsterdam 3 +DigitalOcean Spaces Amsterdam 3 .RE .IP \[bu] 2 \[dq]sgp1.digitaloceanspaces.com\[dq] .RS 2 .IP \[bu] 2 -Digital Ocean Spaces Singapore 1 +DigitalOcean Spaces Singapore 1 .RE .IP \[bu] 2 \[dq]localhost:8333\[dq] @@ -26648,19 +26999,57 @@ Seagate Lyve Cloud AP Southeast 1 (Singapore) \[dq]s3.wasabisys.com\[dq] .RS 2 .IP \[bu] 2 -Wasabi US East endpoint +Wasabi US East 1 (N. +Virginia) +.RE +.IP \[bu] 2 +\[dq]s3.us-east-2.wasabisys.com\[dq] +.RS 2 +.IP \[bu] 2 +Wasabi US East 2 (N. +Virginia) +.RE +.IP \[bu] 2 +\[dq]s3.us-central-1.wasabisys.com\[dq] +.RS 2 +.IP \[bu] 2 +Wasabi US Central 1 (Texas) .RE .IP \[bu] 2 \[dq]s3.us-west-1.wasabisys.com\[dq] .RS 2 .IP \[bu] 2 -Wasabi US West endpoint +Wasabi US West 1 (Oregon) +.RE +.IP \[bu] 2 +\[dq]s3.ca-central-1.wasabisys.com\[dq] +.RS 2 +.IP \[bu] 2 +Wasabi CA Central 1 (Toronto) .RE .IP \[bu] 2 \[dq]s3.eu-central-1.wasabisys.com\[dq] .RS 2 .IP \[bu] 2 -Wasabi EU Central endpoint +Wasabi EU Central 1 (Amsterdam) +.RE +.IP \[bu] 2 +\[dq]s3.eu-central-2.wasabisys.com\[dq] +.RS 2 +.IP \[bu] 2 +Wasabi EU Central 2 (Frankfurt) +.RE +.IP \[bu] 2 +\[dq]s3.eu-west-1.wasabisys.com\[dq] +.RS 2 +.IP \[bu] 2 +Wasabi EU West 1 (London) +.RE +.IP \[bu] 2 +\[dq]s3.eu-west-2.wasabisys.com\[dq] +.RS 2 +.IP \[bu] 2 +Wasabi EU West 2 (Paris) .RE .IP \[bu] 2 \[dq]s3.ap-northeast-1.wasabisys.com\[dq] @@ -26675,6 +27064,24 @@ Wasabi AP Northeast 1 (Tokyo) endpoint Wasabi AP Northeast 2 (Osaka) endpoint .RE .IP \[bu] 2 +\[dq]s3.ap-southeast-1.wasabisys.com\[dq] +.RS 2 +.IP \[bu] 2 +Wasabi AP Southeast 1 (Singapore) +.RE +.IP \[bu] 2 +\[dq]s3.ap-southeast-2.wasabisys.com\[dq] +.RS 2 +.IP \[bu] 2 +Wasabi AP Southeast 2 (Sydney) +.RE +.IP \[bu] 2 +\[dq]storage.iran.liara.space\[dq] +.RS 2 +.IP \[bu] 2 +Liara Iran endpoint +.RE +.IP \[bu] 2 \[dq]s3.ir-thr-at1.arvanstorage.com\[dq] .RS 2 .IP \[bu] 2 @@ -27510,7 +27917,7 @@ Config: location_constraint Env Var: RCLONE_S3_LOCATION_CONSTRAINT .IP \[bu] 2 Provider: -!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS +!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS .IP \[bu] 2 Type: string .IP \[bu] 2 @@ -27528,6 +27935,9 @@ https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Note that this ACL is applied when server-side copying objects as S3 doesn\[aq]t copy the ACL from the source but rather writes a fresh one. .PP +If the acl is an empty string then no X-Amz-Acl: header is added and the +default (private) will be used. +.PP Properties: .IP \[bu] 2 Config: acl @@ -27880,6 +28290,31 @@ Infrequent access storage mode .RE .SS --s3-storage-class .PP +The storage class to use when storing new objects in Liara +.PP +Properties: +.IP \[bu] 2 +Config: storage_class +.IP \[bu] 2 +Env Var: RCLONE_S3_STORAGE_CLASS +.IP \[bu] 2 +Provider: Liara +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.IP \[bu] 2 +Examples: +.RS 2 +.IP \[bu] 2 +\[dq]STANDARD\[dq] +.RS 2 +.IP \[bu] 2 +Standard storage class +.RE +.RE +.SS --s3-storage-class +.PP The storage class to use when storing new objects in ArvanCloud. .PP Properties: @@ -28034,9 +28469,9 @@ Deep archive storage mode .PP Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, -Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, -IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, -SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). +Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, +IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, +Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi). .SS --s3-bucket-acl .PP Canned ACL used when creating buckets. @@ -28047,6 +28482,9 @@ https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Note that this ACL is applied when only when creating buckets. If it isn\[aq]t set then \[dq]acl\[dq] is used instead. .PP +If the \[dq]acl\[dq] and \[dq]bucket_acl\[dq] are empty strings then no +X-Amz-Acl: header is added and the default (private) will be used. +.PP Properties: .IP \[bu] 2 Config: bucket_acl @@ -28823,6 +29261,42 @@ Env Var: RCLONE_S3_DECOMPRESS Type: bool .IP \[bu] 2 Default: false +.SS --s3-might-gzip +.PP +Set this if the backend might gzip objects. +.PP +Normally providers will not alter objects when they are downloaded. +If an object was not uploaded with \f[C]Content-Encoding: gzip\f[R] then +it won\[aq]t be set on download. +.PP +However some providers may gzip objects even if they weren\[aq]t +uploaded with \f[C]Content-Encoding: gzip\f[R] (eg Cloudflare). +.PP +A symptom of this would be receiving errors like +.IP +.nf +\f[C] +ERROR corrupted on transfer: sizes differ NNN vs MMM +\f[R] +.fi +.PP +If you set this flag and rclone downloads an object with +Content-Encoding: gzip set and chunked transfer encoding, then rclone +will decompress the object on the fly. +.PP +If this is set to unset (the default) then rclone will choose according +to the provider setting what to apply, but you can override rclone\[aq]s +choice here. +.PP +Properties: +.IP \[bu] 2 +Config: might_gzip +.IP \[bu] 2 +Env Var: RCLONE_S3_MIGHT_GZIP +.IP \[bu] 2 +Type: Tristate +.IP \[bu] 2 +Default: unset .SS --s3-no-system-metadata .PP Suppress setting and reading of system metadata @@ -29318,7 +29792,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. \&... -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \[rs] (s3) \&... Storage> s3 @@ -29518,7 +29992,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. [snip] - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \[rs] (s3) [snip] Storage> 5 @@ -29659,7 +30133,7 @@ Choose a number from below, or type in your own value \[rs] \[dq]alias\[dq] 2 / Amazon Drive \[rs] \[dq]amazon cloud drive\[dq] - 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio, IBM COS) + 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, Liara, ArvanCloud, Minio, IBM COS) \[rs] \[dq]s3\[dq] 4 / Backblaze B2 \[rs] \[dq]b2\[dq] @@ -29861,7 +30335,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \[rs] (s3) [snip] Storage> s3 @@ -29978,7 +30452,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi \[rs] (s3) [snip] Storage> s3 @@ -30295,7 +30769,7 @@ Choose a number from below, or type in your own value \[rs] (alias) 4 / Amazon Drive \[rs] (amazon cloud drive) - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi \[rs] (s3) [snip] Storage> s3 @@ -30571,7 +31045,7 @@ Choose \f[C]s3\f[R] backend Type of storage to configure. Choose a number from below, or type in your own value. [snip] -XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS +XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS \[rs] (s3) [snip] Storage> s3 @@ -30788,7 +31262,7 @@ name> wasabi Type of storage to configure. Choose a number from below, or type in your own value [snip] -XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio) +XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio, Liara) \[rs] \[dq]s3\[dq] [snip] Storage> s3 @@ -30910,7 +31384,7 @@ Type of storage to configure. Enter a string value. Press Enter for the default (\[dq]\[dq]). Choose a number from below, or type in your own value [snip] - 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Minio, and Tencent COS + 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS \[rs] \[dq]s3\[dq] [snip] Storage> s3 @@ -31027,7 +31501,7 @@ Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. ... - 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS + 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS \[rs] (s3) ... Storage> s3 @@ -31257,6 +31731,115 @@ d) Delete this remote y/e/d> y \f[R] .fi +.SS Liara +.PP +Here is an example of making a Liara Object +Storage (https://liara.ir/landing/object-storage) configuration. +First run: +.IP +.nf +\f[C] +rclone config +\f[R] +.fi +.PP +This will guide you through an interactive setup process. +.IP +.nf +\f[C] +No remotes found, make a new one? +n) New remote +s) Set configuration password +n/s> n +name> Liara +Type of storage to configure. +Choose a number from below, or type in your own value +[snip] +XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Liara, Minio) + \[rs] \[dq]s3\[dq] +[snip] +Storage> s3 +Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank. +Choose a number from below, or type in your own value + 1 / Enter AWS credentials in the next step + \[rs] \[dq]false\[dq] + 2 / Get AWS credentials from the environment (env vars or IAM) + \[rs] \[dq]true\[dq] +env_auth> 1 +AWS Access Key ID - leave blank for anonymous access or runtime credentials. +access_key_id> YOURACCESSKEY +AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials. +secret_access_key> YOURSECRETACCESSKEY +Region to connect to. +Choose a number from below, or type in your own value + / The default endpoint + 1 | US Region, Northern Virginia, or Pacific Northwest. + | Leave location constraint empty. + \[rs] \[dq]us-east-1\[dq] +[snip] +region> +Endpoint for S3 API. +Leave blank if using Liara to use the default endpoint for the region. +Specify if using an S3 clone such as Ceph. +endpoint> storage.iran.liara.space +Canned ACL used when creating buckets and/or storing objects in S3. +For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl +Choose a number from below, or type in your own value + 1 / Owner gets FULL_CONTROL. No one else has access rights (default). + \[rs] \[dq]private\[dq] +[snip] +acl> +The server-side encryption algorithm used when storing this object in S3. +Choose a number from below, or type in your own value + 1 / None + \[rs] \[dq]\[dq] + 2 / AES256 + \[rs] \[dq]AES256\[dq] +server_side_encryption> +The storage class to use when storing objects in S3. +Choose a number from below, or type in your own value + 1 / Default + \[rs] \[dq]\[dq] + 2 / Standard storage class + \[rs] \[dq]STANDARD\[dq] +storage_class> +Remote config +-------------------- +[Liara] +env_auth = false +access_key_id = YOURACCESSKEY +secret_access_key = YOURSECRETACCESSKEY +endpoint = storage.iran.liara.space +location_constraint = +acl = +server_side_encryption = +storage_class = +-------------------- +y) Yes this is OK +e) Edit this remote +d) Delete this remote +y/e/d> y +\f[R] +.fi +.PP +This will leave the config file looking like this. +.IP +.nf +\f[C] +[Liara] +type = s3 +provider = Liara +env_auth = false +access_key_id = YOURACCESSKEY +secret_access_key = YOURSECRETACCESSKEY +region = +endpoint = storage.iran.liara.space +location_constraint = +acl = +server_side_encryption = +storage_class = +\f[R] +.fi .SS ArvanCloud .PP ArvanCloud (https://www.arvancloud.com/en/products/cloud-storage) @@ -31280,7 +31863,7 @@ name> ArvanCloud Type of storage to configure. Choose a number from below, or type in your own value [snip] -XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Minio) +XX / Amazon S3 (also Dreamhost, Ceph, ChinaMobile, ArvanCloud, Liara, Minio) \[rs] \[dq]s3\[dq] [snip] Storage> s3 @@ -31414,7 +31997,7 @@ Choose a number from below, or type in your own value \[rs] \[dq]alias\[dq] 3 / Amazon Drive \[rs] \[dq]amazon cloud drive\[dq] - 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, Minio, and Tencent COS + 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS \[rs] \[dq]s3\[dq] [snip] Storage> s3 @@ -32406,9 +32989,10 @@ Choose a number from below, or type in your own value \[rs] \[dq]enterprise\[dq] box_sub_type> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -32546,9 +33130,10 @@ Already have a token - refresh? y) Yes n) No y/n> y -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -32895,6 +33480,10 @@ rclone maps this to and from an identical looking unicode equivalent .PP Box only supports filenames up to 255 characters in length. .PP +Box has API rate +limits (https://developer.box.com/guides/api-calls/permissions-and-errors/rate-limits/) +that sometimes reduce the speed of rclone. +.PP \f[C]rclone about\f[R] is not supported by the Box backend. Backends without this capability cannot determine free space for an rclone mount or use policy \f[C]mfs\f[R] (most free space) as a member @@ -32903,7 +33492,7 @@ of an rclone union remote. See List of backends that do not support rclone about (https://rclone.org/overview/#optional-features) and rclone about (https://rclone.org/commands/rclone_about/) -.SH Cache (DEPRECATED) +.SH Cache .PP The \f[C]cache\f[R] remote wraps another existing remote and stores file structure and its data for long running tasks like @@ -33732,7 +34321,7 @@ Print stats on the cache backend in JSON format. rclone backend stats remote: [options] [+] \f[R] .fi -.SH Chunker (BETA) +.SH Chunker .PP The \f[C]chunker\f[R] overlay transparently splits large files into smaller chunks during upload to wrapped remote and transparently @@ -34395,9 +34984,10 @@ y) Yes n) No y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -35643,7 +36233,7 @@ For full protection against this you should always use a salt. .IP \[bu] 2 rclone cryptdecode (https://rclone.org/commands/rclone_cryptdecode/) - Show forward/reverse mapping of encrypted filenames -.SH Compress (Experimental) +.SH Compress .SS Warning .PP This remote is currently \f[B]experimental\f[R]. @@ -36064,6 +36654,17 @@ y/e/d> y \f[R] .fi .PP +See the remote setup docs (https://rclone.org/remote_setup/) for how to +set it up on a machine with no Internet browser available. +.PP +Note that rclone runs a webserver on your local machine to collect the +token as returned from Dropbox. +This only runs from the moment it opens your browser to the moment you +get back the verification code. +This is on \f[C]http://127.0.0.1:53682/\f[R] and it may require you to +unblock it temporarily if you are running a host firewall, or use manual +mode. +.PP You can then use it like this, .PP List directories in top level of your dropbox @@ -37218,7 +37819,7 @@ Use Implicit FTPS (FTP over TLS). When using implicit FTP over TLS the client connects using TLS right from the start which breaks compatibility with non-TLS-aware servers. This is usually served over port 990 rather than port 21. -Cannot be used in combination with explicit FTP. +Cannot be used in combination with explicit FTPS. .PP Properties: .IP \[bu] 2 @@ -37236,7 +37837,7 @@ Use Explicit FTPS (FTP over TLS). When using explicit FTP over TLS the client explicitly requests security from the server in order to upgrade a plain text connection to an encrypted one. -Cannot be used in combination with implicit FTP. +Cannot be used in combination with implicit FTPS. .PP Properties: .IP \[bu] 2 @@ -37667,9 +38268,10 @@ Choose a number from below, or type in your own value \[rs] \[dq]DURABLE_REDUCED_AVAILABILITY\[dq] storage_class> 5 Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine or Y didn\[aq]t work +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -37694,8 +38296,12 @@ y/e/d> y \f[R] .fi .PP +See the remote setup docs (https://rclone.org/remote_setup/) for how to +set it up on a machine with no Internet browser available. +.PP Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. +token as returned from Google if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on \f[C]http://127.0.0.1:53682/\f[R] and this it may require you @@ -38617,9 +39223,10 @@ scope> 1 Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login. service_account_file> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine or Y didn\[aq]t work +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -38647,8 +39254,12 @@ y/e/d> y \f[R] .fi .PP +See the remote setup docs (https://rclone.org/remote_setup/) for how to +set it up on a machine with no Internet browser available. +.PP Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. +token as returned from Google if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on \f[C]http://127.0.0.1:53682/\f[R] and it may require you to @@ -40729,9 +41340,10 @@ y) Yes n) No y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -40756,8 +41368,12 @@ y/e/d> y \f[R] .fi .PP +See the remote setup docs (https://rclone.org/remote_setup/) for how to +set it up on a machine with no Internet browser available. +.PP Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. +token as returned from Google if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on \f[C]http://127.0.0.1:53682/\f[R] and this may require you to @@ -41205,7 +41821,7 @@ Rclone cannot delete files anywhere except under \f[C]album\f[R]. .PP The Google Photos API does not support deleting albums - see bug #135714733 (https://issuetracker.google.com/issues/135714733). -.SH Hasher (EXPERIMENTAL) +.SH Hasher .PP Hasher is a special overlay backend to create remotes which handle checksums for other remotes. @@ -41950,7 +42566,10 @@ Leave blank normally. scope_access> Edit advanced config? y/n> n -Use auto config? +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y/n> y If your browser doesn\[aq]t open automatically go to the following link: http://127.0.0.1:53682/auth?state=xxxxxxxxxxxxxxxxxxxxxx Log in and authorize rclone for access @@ -42812,6 +43431,26 @@ It can be triggered when you did a server-side copy. .PP Reading metadata will also provide custom (non-standard nor reserved) ones. +.SS Filtering auto generated files +.PP +The Internet Archive automatically creates metadata files after upload. +These can cause problems when doing an \f[C]rclone sync\f[R] as rclone +will try, and fail, to delete them. +These metadata files are not changeable, as they are created by the +Internet Archive automatically. +.PP +These auto-created files can be excluded from the sync using metadata +filtering (https://rclone.org/filtering/#metadata). +.IP +.nf +\f[C] +rclone sync ... --metadata-exclude \[dq]source=metadata\[dq] --metadata-exclude \[dq]format=Metadata\[dq] +\f[R] +.fi +.PP +Which excludes from the sync any files which have the +\f[C]source=metadata\f[R] or \f[C]format=Metadata\f[R] flags which are +added to Internet Archive auto-created files. .SS Configuration .PP Here is an example of making an internetarchive configuration. @@ -44213,7 +44852,32 @@ supported by rclone) .SS Configuration .PP Here is an example of making a mailru configuration. -First create a Mail.ru Cloud account and choose a tariff, then run +.PP +First create a Mail.ru Cloud account and choose a tariff. +.PP +You will need to log in and create an app password for rclone. +Rclone \f[B]will not work\f[R] with your normal username and password - +it will give an error like +\f[C]oauth2: server response missing access_token\f[R]. +.IP \[bu] 2 +Click on your user icon in the top right +.IP \[bu] 2 +Go to Security / \[dq]\[u041F]\[u0430]\[u0440]\[u043E]\[u043B]\[u044C] +\[u0438] +\[u0431]\[u0435]\[u0437]\[u043E]\[u043F]\[u0430]\[u0441]\[u043D]\[u043E]\[u0441]\[u0442]\[u044C]\[dq] +.IP \[bu] 2 +Click password for apps / +\[dq]\[u041F]\[u0430]\[u0440]\[u043E]\[u043B]\[u0438] +\[u0434]\[u043B]\[u044F] +\[u0432]\[u043D]\[u0435]\[u0448]\[u043D]\[u0438]\[u0445] +\[u043F]\[u0440]\[u0438]\[u043B]\[u043E]\[u0436]\[u0435]\[u043D]\[u0438]\[u0439]\[dq] +.IP \[bu] 2 +Add the password - give it a name - eg \[dq]rclone\[dq] +.IP \[bu] 2 +Copy the password and use this password below - your normal login +password won\[aq]t work. +.PP +Now run .IP .nf \f[C] @@ -44244,6 +44908,10 @@ User name (usually email) Enter a string value. Press Enter for the default (\[dq]\[dq]). user> username\[at]mail.ru Password + +This must be an app password - rclone will not work with your normal +password. See the Configuration section in the docs for how to make an +app password. y) Yes type in my own password g) Generate random password y/g> y @@ -44441,6 +45109,11 @@ Required: true .PP Password. .PP +This must be an app password - rclone will not work with your normal +password. +See the Configuration section in the docs for how to make an app +password. +.PP \f[B]NB\f[R] Input to this must be obscured - see rclone obscure (https://rclone.org/commands/rclone_obscure/). .PP @@ -45608,7 +46281,14 @@ The modified time is stored as metadata on the object with the \f[C]mtime\f[R] key. It is stored using RFC3339 Format time with nanosecond precision. The metadata is supplied during directory listings so there is no -overhead to using it. +performance overhead to using it. +.PP +If you wish to use the Azure standard \f[C]LastModified\f[R] time stored +on the object as the modified time, then use the +\f[C]--use-server-modtime\f[R] flag. +Note that rclone can\[aq]t set \f[C]LastModified\f[R], so using the +\f[C]--update\f[R] flag when syncing is recommended if using +\f[C]--use-server-modtime\f[R]. .SS Performance .PP When uploading large files, increasing the value of @@ -45682,10 +46362,126 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in chunks only have an MD5 if the source remote was capable of MD5 hashes, e.g. the local disk. -.SS Authenticating with Azure Blob Storage +.SS Authentication .PP -Rclone has 3 ways of authenticating with Azure Blob Storage: -.SS Account and Key +There are a number of ways of supplying credentials for Azure Blob +Storage. +Rclone tries them in the order of the sections below. +.SS Env Auth +.PP +If the \f[C]env_auth\f[R] config parameter is \f[C]true\f[R] then rclone +will pull credentials from the environment or runtime. +.PP +It tries these authentication methods in this order: +.IP "1." 3 +Environment Variables +.IP "2." 3 +Managed Service Identity Credentials +.IP "3." 3 +Azure CLI credentials (as used by the az tool) +.PP +These are described in the following sections +.SS Env Auth: 1. Environment Variables +.PP +If \f[C]env_auth\f[R] is set and environment variables are present +rclone authenticates a service principal with a secret or certificate, +or a user with a password, depending on which environment variable are +set. +It reads configuration from these variables, in the following order: +.IP "1." 3 +Service principal with client secret +.RS 4 +.IP \[bu] 2 +\f[C]AZURE_TENANT_ID\f[R]: ID of the service principal\[aq]s tenant. +Also called its \[dq]directory\[dq] ID. +.IP \[bu] 2 +\f[C]AZURE_CLIENT_ID\f[R]: the service principal\[aq]s client ID +.IP \[bu] 2 +\f[C]AZURE_CLIENT_SECRET\f[R]: one of the service principal\[aq]s client +secrets +.RE +.IP "2." 3 +Service principal with certificate +.RS 4 +.IP \[bu] 2 +\f[C]AZURE_TENANT_ID\f[R]: ID of the service principal\[aq]s tenant. +Also called its \[dq]directory\[dq] ID. +.IP \[bu] 2 +\f[C]AZURE_CLIENT_ID\f[R]: the service principal\[aq]s client ID +.IP \[bu] 2 +\f[C]AZURE_CLIENT_CERTIFICATE_PATH\f[R]: path to a PEM or PKCS12 +certificate file including the private key. +.IP \[bu] 2 +\f[C]AZURE_CLIENT_CERTIFICATE_PASSWORD\f[R]: (optional) password for the +certificate file. +.IP \[bu] 2 +\f[C]AZURE_CLIENT_SEND_CERTIFICATE_CHAIN\f[R]: (optional) Specifies +whether an authentication request will include an x5c header to support +subject name / issuer based authentication. +When set to \[dq]true\[dq] or \[dq]1\[dq], authentication requests +include the x5c header. +.RE +.IP "3." 3 +User with username and password +.RS 4 +.IP \[bu] 2 +\f[C]AZURE_TENANT_ID\f[R]: (optional) tenant to authenticate in. +Defaults to \[dq]organizations\[dq]. +.IP \[bu] 2 +\f[C]AZURE_CLIENT_ID\f[R]: client ID of the application the user will +authenticate to +.IP \[bu] 2 +\f[C]AZURE_USERNAME\f[R]: a username (usually an email address) +.IP \[bu] 2 +\f[C]AZURE_PASSWORD\f[R]: the user\[aq]s password +.RE +.SS Env Auth: 2. Managed Service Identity Credentials +.PP +When using Managed Service Identity if the VM(SS) on which this program +is running has a system-assigned identity, it will be used by default. +If the resource has no system-assigned but exactly one user-assigned +identity, the user-assigned identity will be used by default. +.PP +If the resource has multiple user-assigned identities you will need to +unset \f[C]env_auth\f[R] and set \f[C]use_msi\f[R] instead. +See the \f[C]use_msi\f[R] section. +.SS Env Auth: 3. Azure CLI credentials (as used by the az tool) +.PP +Credentials created with the \f[C]az\f[R] tool can be picked up using +\f[C]env_auth\f[R]. +.PP +For example if you were to login with a service principal like this: +.IP +.nf +\f[C] +az login --service-principal -u XXX -p XXX --tenant XXX +\f[R] +.fi +.PP +Then you could access rclone resources like this: +.IP +.nf +\f[C] +rclone lsf :azureblob,env_auth,account=ACCOUNT:CONTAINER +\f[R] +.fi +.PP +Or +.IP +.nf +\f[C] +rclone lsf --azureblob-env-auth --azureblob-acccount=ACCOUNT :azureblob:CONTAINER +\f[R] +.fi +.PP +Which is analogous to using the \f[C]az\f[R] tool: +.IP +.nf +\f[C] +az storage blob list --container-name CONTAINER --account-name ACCOUNT --auth-mode login +\f[R] +.fi +.SS Account and Shared Key .PP This is the most straight forward and least flexible way. Just fill in the \f[C]account\f[R] and \f[C]key\f[R] lines and leave the @@ -45694,7 +46490,7 @@ rest blank. .PP This can be an account level SAS URL or container level SAS URL. .PP -To use it leave \f[C]account\f[R], \f[C]key\f[R] blank and fill in +To use it leave \f[C]account\f[R] and \f[C]key\f[R] blank and fill in \f[C]sas_url\f[R]. .PP An account level SAS URL or container level SAS URL can be obtained from @@ -45733,15 +46529,99 @@ rclone ls azureblob:othercontainer Container level SAS URLs are useful for temporarily allowing third parties access to a single container or putting credentials into an untrusted environment such as a CI build server. +.SS Service principal with client secret +.PP +If these variables are set, rclone will authenticate with a service +principal with a client secret. +.IP \[bu] 2 +\f[C]tenant\f[R]: ID of the service principal\[aq]s tenant. +Also called its \[dq]directory\[dq] ID. +.IP \[bu] 2 +\f[C]client_id\f[R]: the service principal\[aq]s client ID +.IP \[bu] 2 +\f[C]client_secret\f[R]: one of the service principal\[aq]s client +secrets +.PP +The credentials can also be placed in a file using the +\f[C]service_principal_file\f[R] configuration option. +.SS Service principal with certificate +.PP +If these variables are set, rclone will authenticate with a service +principal with certificate. +.IP \[bu] 2 +\f[C]tenant\f[R]: ID of the service principal\[aq]s tenant. +Also called its \[dq]directory\[dq] ID. +.IP \[bu] 2 +\f[C]client_id\f[R]: the service principal\[aq]s client ID +.IP \[bu] 2 +\f[C]client_certificate_path\f[R]: path to a PEM or PKCS12 certificate +file including the private key. +.IP \[bu] 2 +\f[C]client_certificate_password\f[R]: (optional) password for the +certificate file. +.IP \[bu] 2 +\f[C]client_send_certificate_chain\f[R]: (optional) Specifies whether an +authentication request will include an x5c header to support subject +name / issuer based authentication. +When set to \[dq]true\[dq] or \[dq]1\[dq], authentication requests +include the x5c header. +.PP +\f[B]NB\f[R] \f[C]client_certificate_password\f[R] must be obscured - +see rclone obscure (https://rclone.org/commands/rclone_obscure/). +.SS User with username and password +.PP +If these variables are set, rclone will authenticate with username and +password. +.IP \[bu] 2 +\f[C]tenant\f[R]: (optional) tenant to authenticate in. +Defaults to \[dq]organizations\[dq]. +.IP \[bu] 2 +\f[C]client_id\f[R]: client ID of the application the user will +authenticate to +.IP \[bu] 2 +\f[C]username\f[R]: a username (usually an email address) +.IP \[bu] 2 +\f[C]password\f[R]: the user\[aq]s password +.PP +Microsoft doesn\[aq]t recommend this kind of authentication, because +it\[aq]s less secure than other authentication flows. +This method is not interactive, so it isn\[aq]t compatible with any form +of multi-factor authentication, and the application must already have +user or admin consent. +This credential can only authenticate work and school accounts; it +can\[aq]t authenticate Microsoft accounts. +.PP +\f[B]NB\f[R] \f[C]password\f[R] must be obscured - see rclone +obscure (https://rclone.org/commands/rclone_obscure/). +.SS Managed Service Identity Credentials +.PP +If \f[C]use_msi\f[R] is set then managed service identity credentials +are used. +This authentication only works when running in an Azure service. +\f[C]env_auth\f[R] needs to be unset to use this. +.PP +However if you have multiple user identities to choose from these must +be explicitly specified using exactly one of the +\f[C]msi_object_id\f[R], \f[C]msi_client_id\f[R], or +\f[C]msi_mi_res_id\f[R] parameters. +.PP +If none of \f[C]msi_object_id\f[R], \f[C]msi_client_id\f[R], or +\f[C]msi_mi_res_id\f[R] is set, this is is equivalent to using +\f[C]env_auth\f[R]. .SS Standard options .PP Here are the Standard options specific to azureblob (Microsoft Azure Blob Storage). .SS --azureblob-account .PP -Storage Account Name. +Azure Storage Account Name. .PP -Leave blank to use SAS URL or Emulator. +Set this to the Azure Storage Account Name in use. +.PP +Leave blank to use SAS URL or Emulator, otherwise it needs to be set. +.PP +If this is blank and if env_auth is set it will be read from the +environment variable \f[C]AZURE_STORAGE_ACCOUNT_NAME\f[R] if possible. .PP Properties: .IP \[bu] 2 @@ -45752,6 +46632,190 @@ Env Var: RCLONE_AZUREBLOB_ACCOUNT Type: string .IP \[bu] 2 Required: false +.SS --azureblob-env-auth +.PP +Read credentials from runtime (environment variables, CLI or MSI). +.PP +See the authentication docs for full info. +.PP +Properties: +.IP \[bu] 2 +Config: env_auth +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_ENV_AUTH +.IP \[bu] 2 +Type: bool +.IP \[bu] 2 +Default: false +.SS --azureblob-key +.PP +Storage Account Shared Key. +.PP +Leave blank to use SAS URL or Emulator. +.PP +Properties: +.IP \[bu] 2 +Config: key +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_KEY +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --azureblob-sas-url +.PP +SAS URL for container level access only. +.PP +Leave blank if using account/key or Emulator. +.PP +Properties: +.IP \[bu] 2 +Config: sas_url +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_SAS_URL +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --azureblob-tenant +.PP +ID of the service principal\[aq]s tenant. +Also called its directory ID. +.PP +Set this if using - Service principal with client secret - Service +principal with certificate - User with username and password +.PP +Properties: +.IP \[bu] 2 +Config: tenant +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_TENANT +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --azureblob-client-id +.PP +The ID of the client in use. +.PP +Set this if using - Service principal with client secret - Service +principal with certificate - User with username and password +.PP +Properties: +.IP \[bu] 2 +Config: client_id +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_CLIENT_ID +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --azureblob-client-secret +.PP +One of the service principal\[aq]s client secrets +.PP +Set this if using - Service principal with client secret +.PP +Properties: +.IP \[bu] 2 +Config: client_secret +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_CLIENT_SECRET +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --azureblob-client-certificate-path +.PP +Path to a PEM or PKCS12 certificate file including the private key. +.PP +Set this if using - Service principal with certificate +.PP +Properties: +.IP \[bu] 2 +Config: client_certificate_path +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_CLIENT_CERTIFICATE_PATH +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --azureblob-client-certificate-password +.PP +Password for the certificate file (optional). +.PP +Optionally set this if using - Service principal with certificate +.PP +And the certificate has a password. +.PP +\f[B]NB\f[R] Input to this must be obscured - see rclone +obscure (https://rclone.org/commands/rclone_obscure/). +.PP +Properties: +.IP \[bu] 2 +Config: client_certificate_password +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_CLIENT_CERTIFICATE_PASSWORD +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS Advanced options +.PP +Here are the Advanced options specific to azureblob (Microsoft Azure +Blob Storage). +.SS --azureblob-client-send-certificate-chain +.PP +Send the certificate chain when using certificate auth. +.PP +Specifies whether an authentication request will include an x5c header +to support subject name / issuer based authentication. +When set to true, authentication requests include the x5c header. +.PP +Optionally set this if using - Service principal with certificate +.PP +Properties: +.IP \[bu] 2 +Config: client_send_certificate_chain +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_CLIENT_SEND_CERTIFICATE_CHAIN +.IP \[bu] 2 +Type: bool +.IP \[bu] 2 +Default: false +.SS --azureblob-username +.PP +User name (usually an email address) +.PP +Set this if using - User with username and password +.PP +Properties: +.IP \[bu] 2 +Config: username +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_USERNAME +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --azureblob-password +.PP +The user\[aq]s password +.PP +Set this if using - User with username and password +.PP +\f[B]NB\f[R] Input to this must be obscured - see rclone +obscure (https://rclone.org/commands/rclone_obscure/). +.PP +Properties: +.IP \[bu] 2 +Config: password +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_PASSWORD +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false .SS --azureblob-service-principal-file .PP Path to file containing credentials for use with a service principal. @@ -45775,6 +46839,11 @@ and \[dq]Assign an Azure role for access to blob data\[dq] (https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details. .PP +It may be more convenient to put the credentials directly into the +rclone config file under the \f[C]client_id\f[R], \f[C]tenant\f[R] and +\f[C]client_secret\f[R] keys instead of setting +\f[C]service_principal_file\f[R]. +.PP Properties: .IP \[bu] 2 Config: service_principal_file @@ -45784,36 +46853,6 @@ Env Var: RCLONE_AZUREBLOB_SERVICE_PRINCIPAL_FILE Type: string .IP \[bu] 2 Required: false -.SS --azureblob-key -.PP -Storage Account Key. -.PP -Leave blank to use SAS URL or Emulator. -.PP -Properties: -.IP \[bu] 2 -Config: key -.IP \[bu] 2 -Env Var: RCLONE_AZUREBLOB_KEY -.IP \[bu] 2 -Type: string -.IP \[bu] 2 -Required: false -.SS --azureblob-sas-url -.PP -SAS URL for container level access only. -.PP -Leave blank if using account/key or Emulator. -.PP -Properties: -.IP \[bu] 2 -Config: sas_url -.IP \[bu] 2 -Env Var: RCLONE_AZUREBLOB_SAS_URL -.IP \[bu] 2 -Type: string -.IP \[bu] 2 -Required: false .SS --azureblob-use-msi .PP Use a managed service identity to authenticate (only works in Azure). @@ -45839,25 +46878,6 @@ Env Var: RCLONE_AZUREBLOB_USE_MSI Type: bool .IP \[bu] 2 Default: false -.SS --azureblob-use-emulator -.PP -Uses local storage emulator if provided as \[aq]true\[aq]. -.PP -Leave blank if using real azure storage endpoint. -.PP -Properties: -.IP \[bu] 2 -Config: use_emulator -.IP \[bu] 2 -Env Var: RCLONE_AZUREBLOB_USE_EMULATOR -.IP \[bu] 2 -Type: bool -.IP \[bu] 2 -Default: false -.SS Advanced options -.PP -Here are the Advanced options specific to azureblob (Microsoft Azure -Blob Storage). .SS --azureblob-msi-object-id .PP Object ID of the user-assigned MSI to use, if any. @@ -45903,6 +46923,21 @@ Env Var: RCLONE_AZUREBLOB_MSI_MI_RES_ID Type: string .IP \[bu] 2 Required: false +.SS --azureblob-use-emulator +.PP +Uses local storage emulator if provided as \[aq]true\[aq]. +.PP +Leave blank if using real azure storage endpoint. +.PP +Properties: +.IP \[bu] 2 +Config: use_emulator +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_USE_EMULATOR +.IP \[bu] 2 +Type: bool +.IP \[bu] 2 +Default: false .SS --azureblob-endpoint .PP Endpoint for the service. @@ -46162,6 +47197,22 @@ Blob data within this container can be read via anonymous request. Allow full public read access for container and blob data. .RE .RE +.SS --azureblob-no-check-container +.PP +If set, don\[aq]t attempt to check the container exists or create it. +.PP +This can be useful when trying to minimise the number of transactions +rclone does if you know the container exists already. +.PP +Properties: +.IP \[bu] 2 +Config: no_check_container +.IP \[bu] 2 +Env Var: RCLONE_AZUREBLOB_NO_CHECK_CONTAINER +.IP \[bu] 2 +Type: bool +.IP \[bu] 2 +Default: false .SS --azureblob-no-head-object .PP If set, do not do HEAD before GET when getting objects. @@ -46175,6 +47226,22 @@ Env Var: RCLONE_AZUREBLOB_NO_HEAD_OBJECT Type: bool .IP \[bu] 2 Default: false +.SS Custom upload headers +.PP +You can set custom upload headers with the \f[C]--header-upload\f[R] +flag. +.IP \[bu] 2 +Cache-Control +.IP \[bu] 2 +Content-Disposition +.IP \[bu] 2 +Content-Encoding +.IP \[bu] 2 +Content-Language +.IP \[bu] 2 +Content-Type +.PP +Eg \f[C]--header-upload \[dq]Content-Type: text/potato\[dq]\f[R] .SS Limitations .PP MD5 sums are only uploaded with chunked files if the source has an MD5 @@ -46192,16 +47259,21 @@ about (https://rclone.org/overview/#optional-features) and rclone about (https://rclone.org/commands/rclone_about/) .SS Azure Storage Emulator Support .PP -You can run rclone with storage emulator (usually \f[I]azurite\f[R]). +You can run rclone with the storage emulator (usually +\f[I]azurite\f[R]). .PP To do this, just set up a new remote with \f[C]rclone config\f[R] -following instructions described in introduction and set -\f[C]use_emulator\f[R] config as \f[C]true\f[R]. -You do not need to provide default account name neither an account key. +following the instructions in the introduction and set +\f[C]use_emulator\f[R] in the advanced settings as \f[C]true\f[R]. +You do not need to provide a default account name nor an account key. +But you can override them in the \f[C]account\f[R] and \f[C]key\f[R] +options. +(Prior to v1.61 they were hard coded to \f[I]azurite\f[R]\[aq]s +\f[C]devstoreaccount1\f[R].) .PP Also, if you want to access a storage emulator instance running on a -different machine, you can override \f[I]Endpoint\f[R] parameter in -advanced settings, setting it to +different machine, you can override the \f[C]endpoint\f[R] parameter in +the advanced settings, setting it to \f[C]http(s)://:/devstoreaccount1\f[R] (e.g. \f[C]http://10.254.2.5:10000/devstoreaccount1\f[R]). .SH Microsoft OneDrive @@ -46259,9 +47331,10 @@ y) Yes n) No y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -47259,6 +48332,34 @@ links to be made for the organisation/sharepoint library. To fix the permissions as an admin, take a look at the docs: 1 (https://docs.microsoft.com/en-us/sharepoint/turn-external-sharing-on-or-off), 2 (https://support.microsoft.com/en-us/office/set-up-and-manage-access-requests-94b26e0b-2822-49d4-929a-8455698654b3). +.SS Can not access \f[C]Shared\f[R] with me files +.PP +Shared with me files is not supported by rclone +currently (https://github.com/rclone/rclone/issues/4062), but there is a +workaround: +.IP "1." 3 +Visit https://onedrive.live.com (https://onedrive.live.com/) +.IP "2." 3 +Right click a item in \f[C]Shared\f[R], then click +\f[C]Add shortcut to My files\f[R] in the context +.RS 4 +.PP +Screenshot (Shared with me) +.PP +[IMAGE: make_shortcut (https://user-images.githubusercontent.com/60313789/206118040-7e762b3b-aa61-41a1-8649-cc18889f3572.png)] +.RE +.IP "3." 3 +The shortcut will appear in \f[C]My files\f[R], you can access it with +rclone, it behaves like a normal folder/file. +.RS 4 +.PP +Screenshot (My Files) +.PP +[IMAGE: in_my_files (https://i.imgur.com/0S8H3li.png)] +.RE +.PP +Screenshot (rclone mount) +[IMAGE: rclone_mount (https://i.imgur.com/2Iq66sW.png)] .SH OpenDrive .PP Paths are specified as \f[C]remote:path\f[R] @@ -49714,9 +50815,10 @@ client_id> Pcloud App Client Secret - leave blank normally. client_secret> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -50063,9 +51165,10 @@ Storage> premiumizeme ** See help for premiumizeme backend at: https://rclone.org/premiumizeme/ ** Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -50251,9 +51354,10 @@ Storage> putio ** See help for putio backend at: https://rclone.org/putio/ ** Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -50287,8 +51391,12 @@ e/n/d/r/c/s/q> q \f[R] .fi .PP +See the remote setup docs (https://rclone.org/remote_setup/) for how to +set it up on a machine with no Internet browser available. +.PP Note that rclone runs a webserver on your local machine to collect the -token as returned from Google if you use auto config mode. +token as returned from put.io if using web browser to automatically +authenticate. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on \f[C]http://127.0.0.1:53682/\f[R] and this it may require you @@ -51533,6 +52641,9 @@ diffie-hellman-group-exchange-sha1 Those algorithms are insecure and may allow plaintext data to be recovered by an attacker. .PP +This must be false if you use either ciphers or key_exchange advanced +options. +.PP Properties: .IP \[bu] 2 Config: use_insecure_cipher @@ -51962,6 +53073,84 @@ Env Var: RCLONE_SFTP_SET_ENV Type: SpaceSepList .IP \[bu] 2 Default: +.SS --sftp-ciphers +.PP +Space separated list of ciphers to be used for session encryption, +ordered by preference. +.PP +At least one must match with server configuration. +This can be checked for example using ssh -Q cipher. +.PP +This must not be set if use_insecure_cipher is true. +.PP +Example: +.IP +.nf +\f[C] +aes128-ctr aes192-ctr aes256-ctr aes128-gcm\[at]openssh.com aes256-gcm\[at]openssh.com +\f[R] +.fi +.PP +Properties: +.IP \[bu] 2 +Config: ciphers +.IP \[bu] 2 +Env Var: RCLONE_SFTP_CIPHERS +.IP \[bu] 2 +Type: SpaceSepList +.IP \[bu] 2 +Default: +.SS --sftp-key-exchange +.PP +Space separated list of key exchange algorithms, ordered by preference. +.PP +At least one must match with server configuration. +This can be checked for example using ssh -Q kex. +.PP +This must not be set if use_insecure_cipher is true. +.PP +Example: +.IP +.nf +\f[C] +sntrup761x25519-sha512\[at]openssh.com curve25519-sha256 curve25519-sha256\[at]libssh.org ecdh-sha2-nistp256 +\f[R] +.fi +.PP +Properties: +.IP \[bu] 2 +Config: key_exchange +.IP \[bu] 2 +Env Var: RCLONE_SFTP_KEY_EXCHANGE +.IP \[bu] 2 +Type: SpaceSepList +.IP \[bu] 2 +Default: +.SS --sftp-macs +.PP +Space separated list of MACs (message authentication code) algorithms, +ordered by preference. +.PP +At least one must match with server configuration. +This can be checked for example using ssh -Q mac. +.PP +Example: +.IP +.nf +\f[C] +umac-64-etm\[at]openssh.com umac-128-etm\[at]openssh.com hmac-sha2-256-etm\[at]openssh.com +\f[R] +.fi +.PP +Properties: +.IP \[bu] 2 +Config: macs +.IP \[bu] 2 +Env Var: RCLONE_SFTP_MACS +.IP \[bu] 2 +Type: SpaceSepList +.IP \[bu] 2 +Default: .SS Limitations .PP On some SFTP servers (e.g. @@ -54379,9 +55568,10 @@ client_id> Yandex Client Secret - leave blank normally. client_secret> Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y @@ -54656,9 +55846,10 @@ y) Yes n) No (default) y/n> n Remote config -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> @@ -56024,6 +57215,357 @@ Options: .IP \[bu] 2 \[dq]error\[dq]: return an error based on option value .SH Changelog +.SS v1.61.0 - 2022-12-20 +.PP +See commits (https://github.com/rclone/rclone/compare/v1.60.0...v1.61.0) +.IP \[bu] 2 +New backends +.RS 2 +.IP \[bu] 2 +New S3 providers +.RS 2 +.IP \[bu] 2 +Liara LOS (https://rclone.org/s3/#liara-cloud) (MohammadReza) +.RE +.RE +.IP \[bu] 2 +New Features +.RS 2 +.IP \[bu] 2 +build: Add vulnerability testing using govulncheck (albertony) +.IP \[bu] 2 +cmd: Enable \f[C]SIGINFO\f[R] (Ctrl-T) handler on FreeBSD, NetBSD, +OpenBSD and Dragonfly BSD (x3-apptech) +.IP \[bu] 2 +config: Add config/setpath (https://rclone.org/rc/#config-setpath) for +setting config path via rc/librclone (Nick Craig-Wood) +.IP \[bu] 2 +dedupe +.RS 2 +.IP \[bu] 2 +Count Checks in the stats while scanning for duplicates (Nick +Craig-Wood) +.IP \[bu] 2 +Make dedupe obey the filters (Nick Craig-Wood) +.RE +.IP \[bu] 2 +dlna: Properly attribute code used from https://github.com/anacrolix/dms +(Nick Craig-Wood) +.IP \[bu] 2 +docs +.RS 2 +.IP \[bu] 2 +Add minimum versions and status badges to backend and command docs (Nick +Craig-Wood, albertony) +.IP \[bu] 2 +Remote names may not start or end with space (albertony) +.RE +.IP \[bu] 2 +filter: Add metadata filters +--metadata-include/exclude/filter (https://rclone.org/filtering/#metadata) +and friends (Nick Craig-Wood) +.IP \[bu] 2 +fs +.RS 2 +.IP \[bu] 2 +Make all duration flags take \f[C]y\f[R], \f[C]M\f[R], \f[C]w\f[R], +\f[C]d\f[R] etc suffixes (Nick Craig-Wood) +.IP \[bu] 2 +Add global flag \f[C]--color\f[R] to control terminal colors (Kevin +Verstaen) +.RE +.IP \[bu] 2 +fspath: Allow unicode numbers and letters in remote names (albertony) +.IP \[bu] 2 +lib/file: Improve error message for creating dir on non-existent network +host on windows (albertony) +.IP \[bu] 2 +lib/http: Finish port of rclone servers to \f[C]lib/http\f[R] (Tom +Mombourquette, Nick Craig-Wood) +.IP \[bu] 2 +lib/oauthutil: Improved usability of config flows needing web browser +(Ole Frost) +.IP \[bu] 2 +ncdu +.RS 2 +.IP \[bu] 2 +Add support for modification time (albertony) +.IP \[bu] 2 +Fallback to sort by name also for sort by average size (albertony) +.IP \[bu] 2 +Rework to use tcell directly instead of the termbox wrapper (eNV25) +.RE +.IP \[bu] 2 +rc: Add commands to set GC +Percent (https://rclone.org/rc/#debug-set-gc-percent) & Memory Limit (go +1.19+) (Anagh Kumar Baranwal) +.IP \[bu] 2 +rcat: Preserve metadata when Copy falls back to Rcat (Nick Craig-Wood) +.IP \[bu] 2 +rcd: Refactor rclone rc server to use \f[C]lib/http\f[R] (Nick +Craig-Wood) +.IP \[bu] 2 +rcserver: Avoid generating default credentials with htpasswd (Kamui) +.IP \[bu] 2 +restic: Refactor to use \f[C]lib/http\f[R] (Nolan Woods) +.IP \[bu] 2 +serve http: Support unix sockets and multiple listeners (Tom +Mombourquette) +.IP \[bu] 2 +serve webdav: Refactor to use \f[C]lib/http\f[R] (Nick Craig-Wood) +.IP \[bu] 2 +test: Replace defer cleanup with \f[C]t.Cleanup\f[R] (Eng Zer Jun) +.IP \[bu] 2 +test memory: Read metadata if \f[C]-M\f[R] flag is specified (Nick +Craig-Wood) +.IP \[bu] 2 +wasm: Comply with \f[C]wasm_exec.js\f[R] licence terms (Matthew Vernon) +.RE +.IP \[bu] 2 +Bug Fixes +.RS 2 +.IP \[bu] 2 +build: Update \f[C]golang.org/x/net/http2\f[R] to fix GO-2022-1144 (Nick +Craig-Wood) +.IP \[bu] 2 +restic: Fix typo in docs \[aq]remove\[aq] should be \[aq]remote\[aq] +(asdffdsazqqq) +.IP \[bu] 2 +serve dlna: Fix panic: Logger uninitialized. +(Nick Craig-Wood) +.RE +.IP \[bu] 2 +Mount +.RS 2 +.IP \[bu] 2 +Update cgofuse for FUSE-T support for mounting volumes on Mac (Nick +Craig-Wood) +.RE +.IP \[bu] 2 +VFS +.RS 2 +.IP \[bu] 2 +Windows: fix slow opening of exe files by not truncating files when not +necessary (Nick Craig-Wood) +.IP \[bu] 2 +Fix IO Error opening a file with \f[C]O_CREATE|O_RDONLY\f[R] in +\f[C]--vfs-cache-mode\f[R] not full (Nick Craig-Wood) +.RE +.IP \[bu] 2 +Crypt +.RS 2 +.IP \[bu] 2 +Fix compress wrapping crypt giving upload errors (Nick Craig-Wood) +.RE +.IP \[bu] 2 +Azure Blob +.RS 2 +.IP \[bu] 2 +Port to new SDK (Nick Craig-Wood) +.RS 2 +.IP \[bu] 2 +Revamp authentication to include all methods and docs (Nick Craig-Wood) +.IP \[bu] 2 +Port old authentication methods to new SDK (Nick Craig-Wood, Brad +Ackerman) +.IP \[bu] 2 +Thanks to Stonebranch (https://www.stonebranch.com/) for sponsoring this +work. +.RE +.IP \[bu] 2 +Add \f[C]--azureblob-no-check-container\f[R] to assume container exists +(Nick Craig-Wood) +.IP \[bu] 2 +Add \f[C]--use-server-modtime\f[R] support (Abdullah Saglam) +.IP \[bu] 2 +Add support for custom upload headers (rkettelerij) +.IP \[bu] 2 +Allow emulator account/key override (Roel Arents) +.IP \[bu] 2 +Support simple \[dq]environment credentials\[dq] (Nathaniel Wesley +Filardo) +.IP \[bu] 2 +Ignore \f[C]AuthorizationFailure\f[R] when trying to create a create a +container (Nick Craig-Wood) +.RE +.IP \[bu] 2 +Box +.RS 2 +.IP \[bu] 2 +Added note on Box API rate limits (Ole Frost) +.RE +.IP \[bu] 2 +Drive +.RS 2 +.IP \[bu] 2 +Handle shared drives with leading/trailing space in name (related to) +(albertony) +.RE +.IP \[bu] 2 +FTP +.RS 2 +.IP \[bu] 2 +Update help text of implicit/explicit TLS options to refer to FTPS +instead of FTP (ycdtosa) +.IP \[bu] 2 +Improve performance to speed up \f[C]--files-from\f[R] and +\f[C]NewObject\f[R] (Anthony Pessy) +.RE +.IP \[bu] 2 +HTTP +.RS 2 +.IP \[bu] 2 +Parse GET responses when \f[C]no_head\f[R] is set (Arnie97) +.IP \[bu] 2 +Do not update object size based on \f[C]Range\f[R] requests (Arnie97) +.IP \[bu] 2 +Support \f[C]Content-Range\f[R] response header (Arnie97) +.RE +.IP \[bu] 2 +Onedrive +.RS 2 +.IP \[bu] 2 +Document workaround for shared with me files (vanplus) +.RE +.IP \[bu] 2 +S3 +.RS 2 +.IP \[bu] 2 +Add Liara LOS to provider list (MohammadReza) +.IP \[bu] 2 +Add DigitalOcean Spaces regions \f[C]sfo3\f[R], \f[C]fra1\f[R], +\f[C]syd1\f[R] (Jack) +.IP \[bu] 2 +Avoid privileged \f[C]GetBucketLocation\f[R] to resolve s3 region +(Anthony Pessy) +.IP \[bu] 2 +Stop setting object and bucket ACL to \f[C]private\f[R] if it is an +empty string (Philip Harvey) +.IP \[bu] 2 +If bucket or object ACL is empty string then don\[aq]t add +\f[C]X-Amz-Acl:\f[R] header (Nick Craig-Wood) +.IP \[bu] 2 +Reduce memory consumption for s3 objects (Erik Agterdenbos) +.IP \[bu] 2 +Fix listing loop when using v2 listing on v1 server (Nick Craig-Wood) +.IP \[bu] 2 +Fix nil pointer exception when using Versions (Nick Craig-Wood) +.IP \[bu] 2 +Fix excess memory usage when using versions (Nick Craig-Wood) +.IP \[bu] 2 +Ignore versionIDs from uploads unless using \f[C]--s3-versions\f[R] or +\f[C]--s3-versions-at\f[R] (Nick Craig-Wood) +.RE +.IP \[bu] 2 +SFTP +.RS 2 +.IP \[bu] 2 +Add configuration options to set ssh Ciphers / MACs / KeyExchange +(dgouju) +.IP \[bu] 2 +Auto-detect shell type for fish (albertony) +.IP \[bu] 2 +Fix NewObject with leading / (Nick Craig-Wood) +.RE +.IP \[bu] 2 +Smb +.RS 2 +.IP \[bu] 2 +Fix issue where spurious dot directory is created (albertony) +.RE +.IP \[bu] 2 +Storj +.RS 2 +.IP \[bu] 2 +Implement server side Copy (Kaloyan Raev) +.RE +.SS v1.60.1 - 2022-11-17 +.PP +See commits (https://github.com/rclone/rclone/compare/v1.60.0...v1.60.1) +.IP \[bu] 2 +Bug Fixes +.RS 2 +.IP \[bu] 2 +lib/cache: Fix alias backend shutting down too soon (Nick Craig-Wood) +.IP \[bu] 2 +wasm: Fix walltime link error by adding up-to-date wasm_exec.js +(Jo\[~a]o Henrique Franco) +.IP \[bu] 2 +docs +.RS 2 +.IP \[bu] 2 +Update faq.md with bisync (Samuel Johnson) +.IP \[bu] 2 +Corrected download links in windows install docs (coultonluke) +.IP \[bu] 2 +Add direct download link for windows arm64 (albertony) +.IP \[bu] 2 +Remove link to rclone slack as it is no longer supported (Nick +Craig-Wood) +.IP \[bu] 2 +Faq: how to use a proxy server that requires a username and password +(asdffdsazqqq) +.IP \[bu] 2 +Oracle-object-storage: doc fix (Manoj Ghosh) +.IP \[bu] 2 +Fix typo \f[C]remove\f[R] in rclone_serve_restic command (Joda +St\[:o]\[ss]er) +.IP \[bu] 2 +Fix character that was incorrectly interpreted as markdown (Cl\['e]ment +Notin) +.RE +.RE +.IP \[bu] 2 +VFS +.RS 2 +.IP \[bu] 2 +Fix deadlock caused by cache cleaner and upload finishing (Nick +Craig-Wood) +.RE +.IP \[bu] 2 +Local +.RS 2 +.IP \[bu] 2 +Clean absolute paths (albertony) +.IP \[bu] 2 +Fix -L/--copy-links with filters missing directories (Nick Craig-Wood) +.RE +.IP \[bu] 2 +Mailru +.RS 2 +.IP \[bu] 2 +Note that an app password is now needed (Nick Craig-Wood) +.IP \[bu] 2 +Allow timestamps to be before the epoch 1970-01-01 (Nick Craig-Wood) +.RE +.IP \[bu] 2 +S3 +.RS 2 +.IP \[bu] 2 +Add provider quirk \f[C]--s3-might-gzip\f[R] to fix corrupted on +transfer: sizes differ (Nick Craig-Wood) +.IP \[bu] 2 +Allow Storj to server side copy since it seems to work now (Nick +Craig-Wood) +.IP \[bu] 2 +Fix for unchecked err value in s3 listv2 (Aaron Gokaslan) +.IP \[bu] 2 +Add additional Wasabi locations (techknowlogick) +.RE +.IP \[bu] 2 +Smb +.RS 2 +.IP \[bu] 2 +Fix \f[C]Failed to sync: context canceled\f[R] at the end of syncs (Nick +Craig-Wood) +.RE +.IP \[bu] 2 +WebDAV +.RS 2 +.IP \[bu] 2 +Fix Move/Copy/DirMove when using -server-side-across-configs (Nick +Craig-Wood) +.RE .SS v1.60.0 - 2022-10-21 .PP See commits (https://github.com/rclone/rclone/compare/v1.59.0...v1.60.0) @@ -67037,10 +68579,8 @@ significant amount of metadata, which breaks the desired 1:1 mapping of files to objects. .SS Can rclone do bi-directional sync? .PP -No, not at present. -rclone only does uni-directional sync from A -> B. -It may do in the future though since it has all the primitives - it just -requires writing the algorithm to do it. +Yes, since rclone v1.58.0, bidirectional cloud +sync (https://rclone.org/bisync/) is available. .SS Can I use rclone with an HTTP proxy? .PP Yes. @@ -67073,6 +68613,17 @@ export HTTPS_PROXY=$http_proxy \f[R] .fi .PP +Note: If the proxy server requires a username and password, then use +.IP +.nf +\f[C] +export http_proxy=http://username:password\[at]proxyserver:12345 +export https_proxy=$http_proxy +export HTTP_PROXY=$http_proxy +export HTTPS_PROXY=$http_proxy +\f[R] +.fi +.PP The \f[C]NO_PROXY\f[R] allows you to disable the proxy for specific hosts. Hosts must be comma separated, and can contain domains or parts. @@ -67691,6 +69242,8 @@ andrea rota .IP \[bu] 2 nicolov .IP \[bu] 2 +Matt Joiner +.IP \[bu] 2 Dario Guzik .IP \[bu] 2 qip @@ -68534,6 +70087,50 @@ Manoj Ghosh Tom Mombourquette .IP \[bu] 2 Robert Newson +.IP \[bu] 2 +Samuel Johnson +.IP \[bu] 2 +coultonluke +.IP \[bu] 2 +Anthony Pessy +.IP \[bu] 2 +Philip Harvey +.IP \[bu] 2 +dgouju +.IP \[bu] 2 +Cl\['e]ment Notin +.IP \[bu] 2 +x3-apptech <66947598+x3-apptech@users.noreply.github.com> +.IP \[bu] 2 +Arnie97 +.IP \[bu] 2 +Roel Arents <2691308+roelarents@users.noreply.github.com> +.IP \[bu] 2 +Aaron Gokaslan +.IP \[bu] 2 +techknowlogick +.IP \[bu] 2 +rkettelerij +.IP \[bu] 2 +Kamui +.IP \[bu] 2 +asdffdsazqqq <90116442+asdffdsazqqq@users.noreply.github.com> +.IP \[bu] 2 +Nathaniel Wesley Filardo +.IP \[bu] 2 +ycdtosa +.IP \[bu] 2 +Erik Agterdenbos +.IP \[bu] 2 +Kevin Verstaen <48050031+kverstae@users.noreply.github.com> +.IP \[bu] 2 +MohammadReza +.IP \[bu] 2 +vanplus <60313789+vanplus@users.noreply.github.com> +.IP \[bu] 2 +Jack <16779171+jkpe@users.noreply.github.com> +.IP \[bu] 2 +Abdullah Saglam .SH Contact the rclone project .SS Forum .PP