From fb169a8b54e7fc7684aabbd5ba8d4988a766028b Mon Sep 17 00:00:00 2001 From: Martin Michlmayr Date: Tue, 19 May 2020 19:02:44 +0800 Subject: [PATCH] doc: fix typos throughout docs --- backend/qingstor/qingstor.go | 4 ++-- cmd/hashsum/hashsum.go | 2 +- cmd/move/move.go | 2 +- cmd/serve/httplib/httplib.go | 2 +- docs/content/alias.md | 4 ++-- docs/content/amazonclouddrive.md | 2 +- docs/content/azureblob.md | 2 +- docs/content/cache.md | 10 +++++----- docs/content/changelog.md | 22 +++++++++++----------- docs/content/chunker.md | 4 ++-- docs/content/crypt.md | 6 +++--- docs/content/docs.md | 14 +++++++------- docs/content/drive.md | 6 +++--- docs/content/fichier.md | 2 +- docs/content/filtering.md | 2 +- docs/content/flags.md | 2 +- docs/content/ftp.md | 2 +- docs/content/googlephotos.md | 2 +- docs/content/gui.md | 6 +++--- docs/content/install.sh | 2 +- docs/content/jottacloud.md | 2 +- docs/content/local.md | 2 +- docs/content/mailru.md | 2 +- docs/content/mega.md | 4 ++-- docs/content/onedrive.md | 2 +- docs/content/qingstor.md | 6 +++--- docs/content/rc.md | 4 ++-- docs/content/s3.md | 2 +- docs/content/swift.md | 2 +- docs/content/union.md | 4 ++-- docs/content/webdav.md | 2 +- fs/accounting/token_bucket.go | 2 +- 32 files changed, 66 insertions(+), 66 deletions(-) diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index 037a0d42b..6e58346a9 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -56,7 +56,7 @@ func init() { Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.", }, { Name: "endpoint", - Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"", + Help: "Enter an endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"", }, { Name: "zone", Help: "Zone to connect to.\nDefault is \"pek3a\".", @@ -206,7 +206,7 @@ func (o *Object) split() (bucket, bucketPath string) { // Split an URL into three parts: protocol host and port func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) { /* - Pattern to match a endpoint, + Pattern to match an endpoint, eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443 "http(s)//qingstor.com" --> "http(s)", "qingstor.com", "" "qingstor.com" --> "", "qingstor.com", "" diff --git a/cmd/hashsum/hashsum.go b/cmd/hashsum/hashsum.go index 19c3aeaed..1994b1935 100644 --- a/cmd/hashsum/hashsum.go +++ b/cmd/hashsum/hashsum.go @@ -25,7 +25,7 @@ func init() { var commandDefinition = &cobra.Command{ Use: "hashsum remote:path", - Short: `Produces an hashsum file for all the objects in the path.`, + Short: `Produces a hashsum file for all the objects in the path.`, Long: ` Produces a hash file for all the objects in the path using the hash named. The output is in the same format as the standard diff --git a/cmd/move/move.go b/cmd/move/move.go index 80f96c201..66dbf9a7b 100644 --- a/cmd/move/move.go +++ b/cmd/move/move.go @@ -33,7 +33,7 @@ the remote does not support a server side directory move operation. If no filters are in use and if possible this will server side move ` + "`source:path`" + ` into ` + "`dest:path`" + `. After this ` + "`source:path`" + ` will no -longer longer exist. +longer exist. Otherwise for each file in ` + "`source:path`" + ` selected by the filters (if any) this will move it into ` + "`dest:path`" + `. If possible a server side diff --git a/cmd/serve/httplib/httplib.go b/cmd/serve/httplib/httplib.go index 1bda5b91d..4e19c14d2 100644 --- a/cmd/serve/httplib/httplib.go +++ b/cmd/serve/httplib/httplib.go @@ -103,7 +103,7 @@ https. You will need to supply the --cert and --key flags. If you wish to do client side certificate validation then you will need to supply --client-ca also. ---cert should be a either a PEM encoded certificate or a concatenation +--cert should be either a PEM encoded certificate or a concatenation of that with the CA certificate. --key should be the PEM encoded private key and --client-ca should be the PEM encoded client certificate authority certificate. diff --git a/docs/content/alias.md b/docs/content/alias.md index 83738a7a0..d47b32d90 100644 --- a/docs/content/alias.md +++ b/docs/content/alias.md @@ -15,7 +15,7 @@ eg `remote:directory/subdirectory` or `/directory/subdirectory`. During the initial setup with `rclone config` you will specify the target remote. The target remote can either be a local path or another remote. -Subfolders can be used in target remote. Assume a alias remote named `backup` +Subfolders can be used in target remote. Assume an alias remote named `backup` with the target `mydrive:private/backup`. Invoking `rclone mkdir backup:desktop` is exactly the same as invoking `rclone mkdir mydrive:private/backup/desktop`. @@ -25,7 +25,7 @@ Invoking `rclone mkdir backup:../desktop` is exactly the same as invoking The empty path is not allowed as a remote. To alias the current directory use `.` instead. -Here is an example of how to make a alias called `remote` for local folder. +Here is an example of how to make an alias called `remote` for local folder. First run: rclone config diff --git a/docs/content/amazonclouddrive.md b/docs/content/amazonclouddrive.md index 77fbb1fc4..9d69390fa 100644 --- a/docs/content/amazonclouddrive.md +++ b/docs/content/amazonclouddrive.md @@ -38,7 +38,7 @@ which pass through it. Since rclone doesn't currently have its own Amazon Drive credentials so you will either need to have your own `client_id` and -`client_secret` with Amazon Drive, or use a a third party ouath proxy +`client_secret` with Amazon Drive, or use a third party oauth proxy in which case you will need to enter `client_id`, `client_secret`, `auth_url` and `token_url`. diff --git a/docs/content/azureblob.md b/docs/content/azureblob.md index 5f995cd8d..b13a2c4b4 100644 --- a/docs/content/azureblob.md +++ b/docs/content/azureblob.md @@ -301,7 +301,7 @@ MD5 sums are only uploaded with chunked files if the source has an MD5 sum. This will always be the case for a local to azure copy. ### Azure Storage Emulator Support ### -You can test rlcone with storage emulator locally, to do this make sure azure storage emulator +You can test rclone with storage emulator locally, to do this make sure azure storage emulator installed locally and set up a new remote with `rclone config` follow instructions described in introduction, set `use_emulator` config as `true`, you do not need to provide default account name or key if using emulator. diff --git a/docs/content/cache.md b/docs/content/cache.md index e3cfa0c8d..448b787bc 100644 --- a/docs/content/cache.md +++ b/docs/content/cache.md @@ -185,10 +185,10 @@ Affected settings: ##### Certificate Validation ##### When the Plex server is configured to only accept secure connections, it is -possible to use `.plex.direct` URL's to ensure certificate validation succeeds. -These URL's are used by Plex internally to connect to the Plex server securely. +possible to use `.plex.direct` URLs to ensure certificate validation succeeds. +These URLs are used by Plex internally to connect to the Plex server securely. -The format for this URL's is the following: +The format for these URLs is the following: https://ip-with-dots-replaced.server-hash.plex.direct:32400/ @@ -241,7 +241,7 @@ there is a valid concern that the expiring cache listings can lead to cloud prov throttles or bans due to repeated queries on it for very large mounts. Some recommendations: -- don't use a very small interval for entry informations (`--cache-info-age`) +- don't use a very small interval for entry information (`--cache-info-age`) - while writes aren't yet optimised, you can still write through `cache` which gives you the advantage of adding the file in the cache at the same time if configured to do so. @@ -405,7 +405,7 @@ The plex token for authentication - auto set normally #### --cache-plex-insecure -Skip all certificate verifications when connecting to the Plex server +Skip all certificate verification when connecting to the Plex server - Config: plex_insecure - Env Var: RCLONE_CACHE_PLEX_INSECURE diff --git a/docs/content/changelog.md b/docs/content/changelog.md index 642e0ec95..85c03504b 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -189,7 +189,7 @@ date: "2020-02-01" * copyurl * Add `--auto-filename` flag for using file name from URL in destination path (Denis) * serve dlna: - * Many compatability improvements (Dan Walters) + * Many compatibility improvements (Dan Walters) * Support for external srt subtitles (Dan Walters) * rc * Added command core/quit (Saksham Khanna) @@ -220,7 +220,7 @@ date: "2020-02-01" * filter: Prevent mixing options when `--files-from` is in use (Michele Caci) * serve sftp: Fix crash on unsupported operations (eg Readlink) (Nick Craig-Wood) * Mount - * Allow files of unkown size to be read properly (Nick Craig-Wood) + * Allow files of unknown size to be read properly (Nick Craig-Wood) * Skip tests on <= 2 CPUs to avoid lockup (Nick Craig-Wood) * Fix panic on File.Open (Nick Craig-Wood) * Fix "mount_fusefs: -o timeout=: option not supported" on FreeBSD (Nick Craig-Wood) @@ -259,7 +259,7 @@ date: "2020-02-01" * Fix signature v2_auth headers (Anthony Rusdi) * Fix encoding for control characters (Nick Craig-Wood) * Only ask for URL encoded directory listings if we need them on Ceph (Nick Craig-Wood) - * Add option for multipart failiure behaviour (Aleksandar Jankovic) + * Add option for multipart failure behaviour (Aleksandar Jankovic) * Support for multipart copy (庄天翼) * Fix nil pointer reference if no metadata returned for object (Nick Craig-Wood) * SFTP @@ -695,7 +695,7 @@ date: "2020-02-01" * Jottacloud * Resume and deduplication support (Oliver Heyme) * Use token auth for all API requests Don't store password anymore (Sebastian Bünger) - * Add support for 2-factor authentification (Sebastian Bünger) + * Add support for 2-factor authentication (Sebastian Bünger) * Mega * Implement v2 account login which fixes logins for newer Mega accounts (Nick Craig-Wood) * Return error if an unknown length file is attempted to be uploaded (Nick Craig-Wood) @@ -710,7 +710,7 @@ date: "2020-02-01" * Default `--qingstor-upload-concurrency` to 1 to work around bug (Nick Craig-Wood) * S3 * Implement `--s3-upload-cutoff` for single part uploads below this (Nick Craig-Wood) - * Change `--s3-upload-concurrency` default to 4 to increase perfomance (Nick Craig-Wood) + * Change `--s3-upload-concurrency` default to 4 to increase performance (Nick Craig-Wood) * Add `--s3-bucket-acl` to control bucket ACL (Nick Craig-Wood) * Auto detect region for buckets on operation failure (Nick Craig-Wood) * Add GLACIER storage class (William Cocker) @@ -892,7 +892,7 @@ date: "2020-02-01" * Add link sharing support (jackyzy823) * S3 * Use custom pacer, to retry operations when reasonable (Craig Miskell) - * Use configured server-side-encryption and storace class options when calling CopyObject() (Paul Kohout) + * Use configured server-side-encryption and storage class options when calling CopyObject() (Paul Kohout) * Make `--s3-v2-auth` flag (Nick Craig-Wood) * Fix v2 auth on files with spaces (Nick Craig-Wood) * Union @@ -1018,8 +1018,8 @@ Point release to fix hubic and azureblob backends. * Remove leading / from paths (Nick Craig-Wood) * Swift * Add `storage_policy` (Ruben Vandamme) - * Make it so just `storage_url` or `auth_token` can be overidden (Nick Craig-Wood) - * Fix server side copy bug for unusal file names (Nick Craig-Wood) + * Make it so just `storage_url` or `auth_token` can be overridden (Nick Craig-Wood) + * Fix server side copy bug for unusual file names (Nick Craig-Wood) * Remove leading / from paths (Nick Craig-Wood) * WebDAV * Ensure we call MKCOL with a URL with a trailing / for QNAP interop (Nick Craig-Wood) @@ -1519,7 +1519,7 @@ Point release to fix hubic and azureblob backends. * --old-sync-method deprecated - the remaining uses are covered by --fast-list * This involved a major re-write of all the listing code * Add --tpslimit and --tpslimit-burst to limit transactions per second - * this is useful in conjuction with `rclone mount` to limit external apps + * this is useful in conjunction with `rclone mount` to limit external apps * Add --stats-log-level so can see --stats without -v * Print password prompts to stderr - Hraban Luyat * Warn about duplicate files when syncing @@ -1853,7 +1853,7 @@ Point release to fix hubic and azureblob backends. * Add ap-northeast-2 (Seoul) and ap-south-1 (Mumbai) regions. * Skip setting the modified time for objects > 5GB as it isn't possible. * Backblaze B2 - * Add --b2-versions flag so old versions can be listed and retreived. + * Add --b2-versions flag so old versions can be listed and retrieved. * Treat 403 errors (eg cap exceeded) as fatal. * Implement cleanup command for deleting old file versions. * Make error handling compliant with B2 integrations notes. @@ -1979,7 +1979,7 @@ Point release to fix hubic and azureblob backends. * Add `--delete-before`, `--delete-during`, `--delete-after` flags. * Add `--memprofile` flag to debug memory use. * Warn the user about files with same name but different case - * Make `--include` rules add their implict exclude * at the end of the filter list + * Make `--include` rules add their implicit exclude * at the end of the filter list * Deprecate compiling with go1.3 * Amazon Drive * Fix download of files > 10 GB diff --git a/docs/content/chunker.md b/docs/content/chunker.md index 80cb8e415..27dd19a8c 100644 --- a/docs/content/chunker.md +++ b/docs/content/chunker.md @@ -212,7 +212,7 @@ guarantee given hash for all files. If wrapped remote doesn't support it, chunker will then add metadata to all files, even small. However, this can double the amount of small files in storage and incur additional service charges. You can even use chunker to force md5/sha1 support in any other remote -at expence of sidecar meta objects by setting eg. `chunk_type=sha1all` +at expense of sidecar meta objects by setting eg. `chunk_type=sha1all` to force hashsums and `chunk_size=1P` to effectively disable chunking. Normally, when a file is copied to chunker controlled remote, chunker @@ -293,7 +293,7 @@ Chunker will not automatically rename existing chunks when you run Beware that in result of this some files which have been treated as chunks before the change can pop up in directory listings as normal files and vice versa. The same warning holds for the chunk size. -If you desperately need to change critical chunking setings, you should +If you desperately need to change critical chunking settings, you should run data migration as described above. If wrapped remote is case insensitive, the chunker overlay will inherit diff --git a/docs/content/crypt.md b/docs/content/crypt.md index 41bec7ec7..64242cd75 100644 --- a/docs/content/crypt.md +++ b/docs/content/crypt.md @@ -113,7 +113,7 @@ due to the different salt. Note that rclone does not encrypt - * file length - this can be calcuated within 16 bytes + * file length - this can be calculated within 16 bytes * modification time - used for syncing ## Specifying the remote ## @@ -367,7 +367,7 @@ names, or for debugging purposes. ## Backing up a crypted remote ## -If you wish to backup a crypted remote, it it recommended that you use +If you wish to backup a crypted remote, it is recommended that you use `rclone sync` on the encrypted files, and make sure the passwords are the same in the new encrypted remote. @@ -451,7 +451,7 @@ files. File names are encrypted segment by segment - the path is broken up into `/` separated strings and these are encrypted individually. -File segments are padded using using PKCS#7 to a multiple of 16 bytes +File segments are padded using PKCS#7 to a multiple of 16 bytes before encryption. They are then encrypted with EME using AES with 256 bit key. EME diff --git a/docs/content/docs.md b/docs/content/docs.md index 3c8f1f0a6..8104fb9a5 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -522,7 +522,7 @@ to disable server side move and server side copy use: --disable move,copy -The features can be put in in any case. +The features can be put in any case. To see a list of which features can be disabled use: @@ -794,13 +794,13 @@ Rclone will exit with exit code 8 if the transfer limit is reached. This modifies the behavior of `--max-transfer` Defaults to `--cutoff-mode=hard`. -Specifiying `--cutoff-mode=hard` will stop transferring immediately +Specifying `--cutoff-mode=hard` will stop transferring immediately when Rclone reaches the limit. -Specifiying `--cutoff-mode=soft` will stop starting new transfers +Specifying `--cutoff-mode=soft` will stop starting new transfers when Rclone reaches the limit. -Specifiying `--cutoff-mode=cautious` will try to prevent Rclone +Specifying `--cutoff-mode=cautious` will try to prevent Rclone from reaching the limit. ### --modify-window=TIME ### @@ -827,7 +827,7 @@ time) then each thread writes directly into the file at the correct place. This means that rclone won't create fragmented or sparse files and there won't be any assembly time at the end of the transfer. -The number of threads used to dowload is controlled by +The number of threads used to download is controlled by `--multi-thread-streams`. Use `-vv` if you wish to see info about the threads. @@ -1230,7 +1230,7 @@ Note also that `--track-renames` is incompatible with ### --track-renames-strategy (hash,modtime) ### This option changes the matching criteria for `--track-renames` to match -by any combination of modtime, hash, size. Matchig by size is always enabled +by any combination of modtime, hash, size. Matching by size is always enabled no matter what option is selected here. This also means that it enables `--track-renames` support for encrypted destinations. If nothing is specified, the default option is matching by hashes. @@ -1377,7 +1377,7 @@ Prints the version number SSL/TLS options --------------- -The outoing SSL/TLS connections rclone makes can be controlled with +The outgoing SSL/TLS connections rclone makes can be controlled with these options. For example this can be very useful with the HTTP or WebDAV backends. Rclone HTTP servers have their own set of configuration for SSL/TLS which you can find in their documentation. diff --git a/docs/content/drive.md b/docs/content/drive.md index cdb291df8..b470204e3 100644 --- a/docs/content/drive.md +++ b/docs/content/drive.md @@ -678,7 +678,7 @@ videos. Setting this flag will cause Google photos and videos to return a blank MD5 checksum. -Google photos are identifed by being in the "photos" space. +Google photos are identified by being in the "photos" space. Corrupted checksums are caused by Google modifying the image/video but not updating the checksum. @@ -877,7 +877,7 @@ Keep new head revision of each file forever. Show sizes as storage quota usage, not actual size. -Show the size of a file as the the storage quota used. This is the +Show the size of a file as the storage quota used. This is the current version plus any older versions that have been set to keep forever. @@ -1028,7 +1028,7 @@ etc as rclone knows to ignore the size when doing the transfer. However an unfortunate consequence of this is that you may not be able to download Google docs using `rclone mount`. If it doesn't work you will get a 0 sized file. If you try again the doc may gain its -correct size and be downloadable. Whther it will work on not depends +correct size and be downloadable. Whether it will work on not depends on the application accessing the mount and the OS you are running - experiment to find out if it does work for you! diff --git a/docs/content/fichier.md b/docs/content/fichier.md index b5272831b..8717b20f5 100644 --- a/docs/content/fichier.md +++ b/docs/content/fichier.md @@ -7,7 +7,7 @@ date: "2015-10-14" 1Fichier ----------------------------------------- -This is a backend for the [1ficher](https://1fichier.com) cloud +This is a backend for the [1fichier](https://1fichier.com) cloud storage service. Note that a Premium subscription is required to use the API. diff --git a/docs/content/filtering.md b/docs/content/filtering.md index acab891eb..a97ca8648 100644 --- a/docs/content/filtering.md +++ b/docs/content/filtering.md @@ -308,7 +308,7 @@ This reads a list of file names from the file passed in and **only** these files are transferred. The **filtering rules are ignored** completely if you use this option. -`--files-from` expects a list of files as it's input. Leading / trailing +`--files-from` expects a list of files as its input. Leading / trailing whitespace is stripped from the input lines and lines starting with `#` and `;` are ignored. diff --git a/docs/content/flags.md b/docs/content/flags.md index 647829fcc..bb090ad18 100755 --- a/docs/content/flags.md +++ b/docs/content/flags.md @@ -344,7 +344,7 @@ and may be set in the config file. --qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M) --qingstor-connection-retries int Number of connection retries. (default 3) --qingstor-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Ctl,InvalidUtf8) - --qingstor-endpoint string Enter a endpoint URL to connection QingStor API. + --qingstor-endpoint string Enter an endpoint URL to connection QingStor API. --qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank. --qingstor-secret-access-key string QingStor Secret Access Key (password) --qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1) diff --git a/docs/content/ftp.md b/docs/content/ftp.md index 4dc738197..8da270ef0 100644 --- a/docs/content/ftp.md +++ b/docs/content/ftp.md @@ -132,7 +132,7 @@ Note that not all FTP servers can have all characters in file names, for example FTP supports implicit FTP over TLS servers (FTPS). This has to be enabled in the config for the remote. The default FTPS port is `990` so the -port will likely have to be explictly set in the config for the remote. +port will likely have to be explicitly set in the config for the remote. ### Standard Options diff --git a/docs/content/googlephotos.md b/docs/content/googlephotos.md index 5163748a6..6ab42611a 100644 --- a/docs/content/googlephotos.md +++ b/docs/content/googlephotos.md @@ -180,7 +180,7 @@ into albums. ``` There are two writable parts of the tree, the `upload` directory and -sub directories of the the `album` directory. +sub directories of the `album` directory. The `upload` directory is for uploading files you don't want to put into albums. This will be empty to start with and will contain the diff --git a/docs/content/gui.md b/docs/content/gui.md index 22b3e6aa7..ecaf3b306 100644 --- a/docs/content/gui.md +++ b/docs/content/gui.md @@ -73,7 +73,7 @@ The flag `--rc-web-gui` is shorthand for - `--rc-pass ` - `--rc-serve` -These flags can be overidden as desired. +These flags can be overridden as desired. See also the [rclone rcd documentation](https://rclone.org/commands/rclone_rcd/). @@ -95,14 +95,14 @@ If you want to run the GUI behind a proxy at `/rclone` you could use these flags - `--rc-baseurl rclone` - `--rc-htpasswd /path/to/htpasswd` -Or instead of htpassword if you just want a single user and password: +Or instead of htpasswd if you just want a single user and password: - `--rc-user me` - `--rc-pass mypassword` ## Project -The GUI is being developed in the: [rclone/rclone-webui-react respository](https://github.com/rclone/rclone-webui-react). +The GUI is being developed in the: [rclone/rclone-webui-react repository](https://github.com/rclone/rclone-webui-react). Bug reports and contributions are very welcome :-) diff --git a/docs/content/install.sh b/docs/content/install.sh index 90c400c93..7804627bc 100755 --- a/docs/content/install.sh +++ b/docs/content/install.sh @@ -9,7 +9,7 @@ set -e -#when adding a tool to the list make sure to also add it's corresponding command further in the script +#when adding a tool to the list make sure to also add its corresponding command further in the script unzip_tools_list=('unzip' '7z' 'busybox') usage() { echo "Usage: curl https://rclone.org/install.sh | sudo bash [-s beta]" 1>&2; exit 1; } diff --git a/docs/content/jottacloud.md b/docs/content/jottacloud.md index bed6cda28..acb278625 100644 --- a/docs/content/jottacloud.md +++ b/docs/content/jottacloud.md @@ -161,7 +161,7 @@ as they can't be used in XML strings. ### Deleting files By default rclone will send all files to the trash when deleting files. They will be permanently -deleted automatically after 30 days. You may bypass the trash and permanently delete files immeditaly +deleted automatically after 30 days. You may bypass the trash and permanently delete files immediately by using the [--jottacloud-hard-delete](#jottacloud-hard-delete) flag, or set the equivalent environment variable. Emptying the trash is supported by the [cleanup](/commands/rclone_cleanup/) command. diff --git a/docs/content/local.md b/docs/content/local.md index 1e62b5e1c..c27b899e4 100644 --- a/docs/content/local.md +++ b/docs/content/local.md @@ -428,7 +428,7 @@ See: the [encoding section in the overview](/overview/#encoding) for more info. Here are the commands specific to the local backend. -Run them with with +Run them with rclone backend COMMAND remote: diff --git a/docs/content/mailru.md b/docs/content/mailru.md index 1409f3cf9..054f00b0d 100644 --- a/docs/content/mailru.md +++ b/docs/content/mailru.md @@ -18,7 +18,7 @@ Currently it is recommended to disable 2FA on Mail.ru accounts intended for rclo - Deleted files are by default moved to the trash - Files and directories can be shared via public links - Partial uploads or streaming are not supported, file size must be known before upload -- Maximum file size is limited to 2G for a free acount, unlimited for paid accounts +- Maximum file size is limited to 2G for a free account, unlimited for paid accounts - Storage keeps hash for all files and performs transparent deduplication, the hash algorithm is a modified SHA1 - If a particular file is already present in storage, one can quickly submit file hash diff --git a/docs/content/mega.md b/docs/content/mega.md index 3ddd03aa7..bb89acba0 100644 --- a/docs/content/mega.md +++ b/docs/content/mega.md @@ -109,7 +109,7 @@ Use `rclone dedupe` to fix duplicated files. Mega remotes seem to get blocked (reject logins) under "heavy use". We haven't worked out the exact blocking rules but it seems to be -related to fast paced, sucessive rclone commands. +related to fast paced, successive rclone commands. For example, executing this command 90 times in a row `rclone link remote:file` will cause the remote to become "blocked". This is not an @@ -140,7 +140,7 @@ approach. Note that once blocked, the use of other tools (such as megacmd) is not a sure workaround: following megacmd login times have been -observed in sucession for blocked remote: 7 minutes, 20 min, 30min, 30 +observed in succession for blocked remote: 7 minutes, 20 min, 30min, 30 min, 30min. Web access looks unaffected though. Investigation is continuing in relation to workarounds based on diff --git a/docs/content/onedrive.md b/docs/content/onedrive.md index c00be83fe..ee0886901 100644 --- a/docs/content/onedrive.md +++ b/docs/content/onedrive.md @@ -132,7 +132,7 @@ Client ID and Key by following the steps below: 2. Enter a name for your app, choose account type `Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)`, select `Web` in `Redirect URI` Enter `http://localhost:53682/` and click Register. Copy and keep the `Application (client) ID` under the app name for later use. 3. Under `manage` select `Certificates & secrets`, click `New client secret`. Copy and keep that secret for later use. 4. Under `manage` select `API permissions`, click `Add a permission` and select `Microsoft Graph` then select `delegated permissions`. -5. Search and select the follwing permssions: `Files.Read`, `Files.ReadWrite`, `Files.Read.All`, `Files.ReadWrite.All`, `offline_access`, `User.Read`. Once selected click `Add permissions` at the bottom. +5. Search and select the following permissions: `Files.Read`, `Files.ReadWrite`, `Files.Read.All`, `Files.ReadWrite.All`, `offline_access`, `User.Read`. Once selected click `Add permissions` at the bottom. Now the application is complete. Run `rclone config` to create or edit a OneDrive remote. Supply the app ID and password as Client ID and Secret, respectively. rclone will walk you through the remaining steps. diff --git a/docs/content/qingstor.md b/docs/content/qingstor.md index d52769798..ba8b06c7c 100644 --- a/docs/content/qingstor.md +++ b/docs/content/qingstor.md @@ -43,7 +43,7 @@ QingStor Access Key ID - leave blank for anonymous access or runtime credentials access_key_id> access_key QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials. secret_access_key> secret_key -Enter a endpoint URL to connection QingStor API. +Enter an endpoint URL to connection QingStor API. Leave blank will use the default value "https://qingstor.com:443" endpoint> Zone connect to. Default is "pek3a". @@ -182,7 +182,7 @@ Leave blank for anonymous access or runtime credentials. #### --qingstor-endpoint -Enter a endpoint URL to connection QingStor API. +Enter an endpoint URL to connection QingStor API. Leave blank will use the default value "https://qingstor.com:443" - Config: endpoint @@ -260,7 +260,7 @@ Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently. -NB if you set this to > 1 then the checksums of multpart uploads +NB if you set this to > 1 then the checksums of multipart uploads become corrupted (the uploads themselves are not corrupted though). If you are uploading small numbers of large file over high speed link diff --git a/docs/content/rc.md b/docs/content/rc.md index d698732e0..8352ac2bd 100644 --- a/docs/content/rc.md +++ b/docs/content/rc.md @@ -260,7 +260,7 @@ $ rclone rc job/list ### Assigning operations to groups with _group = -Each rc call has it's own stats group for tracking it's metrics. By default +Each rc call has its own stats group for tracking its metrics. By default grouping is done by the composite group name from prefix `job/` and id of the job like so `job/1`. @@ -426,7 +426,7 @@ Eg } -If the rate parameter is not suppied then the bandwidth is queried +If the rate parameter is not supplied then the bandwidth is queried rclone rc core/bwlimit { diff --git a/docs/content/s3.md b/docs/content/s3.md index bbd8fe1cf..d17d00a1e 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -1320,7 +1320,7 @@ storage_class = [Spaces](https://www.digitalocean.com/products/object-storage/) is an [S3-interoperable](https://developers.digitalocean.com/documentation/spaces/) object storage service from cloud provider DigitalOcean. -To connect to DigitalOcean Spaces you will need an access key and secret key. These can be retrieved on the "[Applications & API](https://cloud.digitalocean.com/settings/api/tokens)" page of the DigitalOcean control panel. They will be needed when promted by `rclone config` for your `access_key_id` and `secret_access_key`. +To connect to DigitalOcean Spaces you will need an access key and secret key. These can be retrieved on the "[Applications & API](https://cloud.digitalocean.com/settings/api/tokens)" page of the DigitalOcean control panel. They will be needed when prompted by `rclone config` for your `access_key_id` and `secret_access_key`. When prompted for a `region` or `location_constraint`, press enter to use the default value. The region must be included in the `endpoint` setting (e.g. `nyc3.digitaloceanspaces.com`). The default values can be used for other settings. diff --git a/docs/content/swift.md b/docs/content/swift.md index 060afadcb..35541852a 100644 --- a/docs/content/swift.md +++ b/docs/content/swift.md @@ -480,7 +480,7 @@ The modified time is stored as metadata on the object as `X-Object-Meta-Mtime` as floating point since the epoch accurate to 1 ns. -This is a defacto standard (used in the official python-swiftclient +This is a de facto standard (used in the official python-swiftclient amongst others) for storing the modification time for an object. ### Restricted filename characters diff --git a/docs/content/union.md b/docs/content/union.md index 47138c1ac..b66267498 100644 --- a/docs/content/union.md +++ b/docs/content/union.md @@ -60,7 +60,7 @@ Some policies rely on quota information. These policies should be used only if y | lus, eplus | Used | | lno, eplno | Objects | -To check if your upstream support the field, run `rclone about remote: [flags]` and see if the reuqired field exists. +To check if your upstream supports the field, run `rclone about remote: [flags]` and see if the required field exists. #### Filters @@ -74,7 +74,7 @@ If all remotes are filtered an error will be returned. #### Policy descriptions -THe policies definition are inspired by [trapexit/mergerfs](https://github.com/trapexit/mergerfs) but not exactly the same. Some policy definition could be different due to the much larger latency of remote file systems. +The policies definition are inspired by [trapexit/mergerfs](https://github.com/trapexit/mergerfs) but not exactly the same. Some policy definition could be different due to the much larger latency of remote file systems. | Policy | Description | |------------------|------------------------------------------------------------| diff --git a/docs/content/webdav.md b/docs/content/webdav.md index 0ffa99d7e..6f23b8200 100644 --- a/docs/content/webdav.md +++ b/docs/content/webdav.md @@ -222,7 +222,7 @@ First, you need to get your remote's URL: - Now take a look at your address bar, the URL should look like this: `https://[YOUR-DOMAIN]-my.sharepoint.com/personal/[YOUR-EMAIL]/_layouts/15/onedrive.aspx` -You'll only need this URL upto the email address. After that, you'll +You'll only need this URL up to the email address. After that, you'll most likely want to add "/Documents". That subdirectory contains the actual data stored on your OneDrive. diff --git a/fs/accounting/token_bucket.go b/fs/accounting/token_bucket.go index 7671918c8..13058f68d 100644 --- a/fs/accounting/token_bucket.go +++ b/fs/accounting/token_bucket.go @@ -177,7 +177,7 @@ Eg } -If the rate parameter is not suppied then the bandwidth is queried +If the rate parameter is not supplied then the bandwidth is queried rclone rc core/bwlimit {