diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go index d93f2acf7..0f89c7eae 100644 --- a/backend/amazonclouddrive/amazonclouddrive.go +++ b/backend/amazonclouddrive/amazonclouddrive.go @@ -83,16 +83,16 @@ func init() { Advanced: true, }, { Name: "upload_wait_per_gb", - Help: `Additional time per GB to wait after a failed complete upload to see if it appears. + Help: `Additional time per GiB to wait after a failed complete upload to see if it appears. Sometimes Amazon Drive gives an error when a file has been fully uploaded but the file appears anyway after a little while. This -happens sometimes for files over 1GB in size and nearly every time for -files bigger than 10GB. This parameter controls the time rclone waits +happens sometimes for files over 1 GiB in size and nearly every time for +files bigger than 10 GiB. This parameter controls the time rclone waits for the file to appear. -The default value for this parameter is 3 minutes per GB, so by -default it will wait 3 minutes for every GB uploaded to see if the +The default value for this parameter is 3 minutes per GiB, so by +default it will wait 3 minutes for every GiB uploaded to see if the file appears. You can disable this feature by setting it to 0. This may cause @@ -112,7 +112,7 @@ in this situation.`, Files this size or more will be downloaded via their "tempLink". This is to work around a problem with Amazon Drive which blocks downloads -of files bigger than about 10GB. The default for this is 9GB which +of files bigger than about 10 GiB. The default for this is 9 GiB which shouldn't need to be changed. To download files above this threshold, rclone requests a "tempLink" diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index fea7a0459..fcfe9a75e 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -47,8 +47,8 @@ const ( timeFormatIn = time.RFC3339 timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" storageDefaultBaseURL = "blob.core.windows.net" - defaultChunkSize = 4 * fs.MebiByte - maxChunkSize = 100 * fs.MebiByte + defaultChunkSize = 4 * fs.Mebi + maxChunkSize = 100 * fs.Mebi uploadConcurrency = 4 defaultAccessTier = azblob.AccessTierNone maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing) @@ -129,11 +129,11 @@ msi_client_id, or msi_mi_res_id parameters.`, Advanced: true, }, { Name: "upload_cutoff", - Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)", + Help: "Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)", Advanced: true, }, { Name: "chunk_size", - Help: `Upload chunk size (<= 100MB). + Help: `Upload chunk size (<= 100 MiB). Note that this is stored in memory and there may be up to "--transfers" chunks stored at once in memory.`, @@ -404,7 +404,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) { } func checkUploadChunkSize(cs fs.SizeSuffix) error { - const minChunkSize = fs.Byte + const minChunkSize = fs.SizeSuffixBase if cs < minChunkSize { return errors.Errorf("%s is less than %s", cs, minChunkSize) } diff --git a/backend/b2/b2.go b/backend/b2/b2.go index 5d5555602..fb3a8bbc5 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -54,10 +54,10 @@ const ( decayConstant = 1 // bigger for slower decay, exponential maxParts = 10000 maxVersions = 100 // maximum number of versions we search in --b2-versions mode - minChunkSize = 5 * fs.MebiByte - defaultChunkSize = 96 * fs.MebiByte - defaultUploadCutoff = 200 * fs.MebiByte - largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max + minChunkSize = 5 * fs.Mebi + defaultChunkSize = 96 * fs.Mebi + defaultUploadCutoff = 200 * fs.Mebi + largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long memoryPoolUseMmap = false ) @@ -116,7 +116,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration Files above this size will be uploaded in chunks of "--b2-chunk-size". -This value should be set no larger than 4.657GiB (== 5GB).`, +This value should be set no larger than 4.657 GiB (== 5 GB).`, Default: defaultUploadCutoff, Advanced: true, }, { @@ -126,7 +126,7 @@ This value should be set no larger than 4.657GiB (== 5GB).`, Any files larger than this that need to be server-side copied will be copied in chunks of this size. -The minimum is 0 and the maximum is 4.6GB.`, +The minimum is 0 and the maximum is 4.6 GiB.`, Default: largeFileCopyCutoff, Advanced: true, }, { diff --git a/backend/b2/upload.go b/backend/b2/upload.go index f10b0e780..44092dda4 100644 --- a/backend/b2/upload.go +++ b/backend/b2/upload.go @@ -230,14 +230,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt // // The number of bytes in the file being uploaded. Note that // this header is required; you cannot leave it out and just - // use chunked encoding. The minimum size of every part but - // the last one is 100MB. + // use chunked encoding. The minimum size of every part but + // the last one is 100 MB (100,000,000 bytes) // // X-Bz-Content-Sha1 // // The SHA1 checksum of the this part of the file. B2 will // check this when the part is uploaded, to make sure that the - // data arrived correctly. The same SHA1 checksum must be + // data arrived correctly. The same SHA1 checksum must be // passed to b2_finish_large_file. opts := rest.Opts{ Method: "POST", diff --git a/backend/box/box.go b/backend/box/box.go index 1ca0a1621..a74d597c5 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -126,7 +126,7 @@ func init() { }}, }, { Name: "upload_cutoff", - Help: "Cutoff for switching to multipart upload (>= 50MB).", + Help: "Cutoff for switching to multipart upload (>= 50 MiB).", Default: fs.SizeSuffix(defaultUploadCutoff), Advanced: true, }, { @@ -1286,7 +1286,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read // upload does a single non-multipart upload // -// This is recommended for less than 50 MB of content +// This is recommended for less than 50 MiB of content func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) { upload := api.UploadFile{ Name: o.fs.opt.Enc.FromStandardName(leaf), diff --git a/backend/cache/cache.go b/backend/cache/cache.go index 29b373287..b55fa6061 100644 --- a/backend/cache/cache.go +++ b/backend/cache/cache.go @@ -98,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path will need to be cleared or unexpected EOF errors will occur.`, Default: DefCacheChunkSize, Examples: []fs.OptionExample{{ - Value: "1m", - Help: "1MB", + Value: "1M", + Help: "1 MiB", }, { Value: "5M", - Help: "5 MB", + Help: "5 MiB", }, { Value: "10M", - Help: "10 MB", + Help: "10 MiB", }}, }, { Name: "info_age", @@ -132,13 +132,13 @@ oldest chunks until it goes under this value.`, Default: DefCacheTotalChunkSize, Examples: []fs.OptionExample{{ Value: "500M", - Help: "500 MB", + Help: "500 MiB", }, { Value: "1G", - Help: "1 GB", + Help: "1 GiB", }, { Value: "10G", - Help: "10 GB", + Help: "10 GiB", }}, }, { Name: "db_path", diff --git a/backend/chunker/chunker.go b/backend/chunker/chunker.go index 1d50b5385..d1c1ef205 100644 --- a/backend/chunker/chunker.go +++ b/backend/chunker/chunker.go @@ -155,7 +155,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir", }, { Name: "chunk_size", Advanced: false, - Default: fs.SizeSuffix(2147483648), // 2GB + Default: fs.SizeSuffix(2147483648), // 2 GiB Help: `Files larger than chunk size will be split in chunks.`, }, { Name: "name_format", @@ -1448,7 +1448,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error { c.accountBytes(size) return nil } - const bufLen = 1048576 // 1MB + const bufLen = 1048576 // 1 MiB buf := make([]byte, bufLen) for size > 0 { n := size diff --git a/backend/chunker/chunker_internal_test.go b/backend/chunker/chunker_internal_test.go index 5b20fcb78..a4c8a34d3 100644 --- a/backend/chunker/chunker_internal_test.go +++ b/backend/chunker/chunker_internal_test.go @@ -33,7 +33,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) { fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: fmt.Sprintf("chunker-upload-%dk", kilobytes), - Size: int64(kilobytes) * int64(fs.KibiByte), + Size: int64(kilobytes) * int64(fs.Kibi), }) }) } diff --git a/backend/compress/compress.go b/backend/compress/compress.go index 88c5afeff..5e9f9b1f9 100644 --- a/backend/compress/compress.go +++ b/backend/compress/compress.go @@ -36,7 +36,7 @@ import ( // Globals const ( initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently - maxChunkSize = 8388608 // at 256KB and 8 MB. + maxChunkSize = 8388608 // at 256 KiB and 8 MiB. bufferSize = 8388608 heuristicBytes = 1048576 diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 5f20ebabe..a396268f7 100755 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -68,8 +68,8 @@ const ( defaultScope = "drive" // chunkSize is the size of the chunks created during a resumable upload and should be a power of two. // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. - minChunkSize = 256 * fs.KibiByte - defaultChunkSize = 8 * fs.MebiByte + minChunkSize = 256 * fs.Kibi + defaultChunkSize = 8 * fs.Mebi partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks" listRGrouping = 50 // number of IDs to search at once when using ListR listRInputBuffer = 1000 // size of input buffer when using ListR @@ -467,7 +467,7 @@ See: https://github.com/rclone/rclone/issues/3631 Default: false, Help: `Make upload limit errors be fatal -At the time of writing it is only possible to upload 750GB of data to +At the time of writing it is only possible to upload 750 GiB of data to Google Drive a day (this is an undocumented limit). When this limit is reached Google Drive produces a slightly different error message. When this flag is set it causes these errors to be fatal. These will stop @@ -484,7 +484,7 @@ See: https://github.com/rclone/rclone/issues/3857 Default: false, Help: `Make download limit errors be fatal -At the time of writing it is only possible to download 10TB of data from +At the time of writing it is only possible to download 10 TiB of data from Google Drive a day (this is an undocumented limit). When this limit is reached Google Drive produces a slightly different error message. When this flag is set it causes these errors to be fatal. These will stop diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index fe0c39d83..3de65d26c 100755 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -65,9 +65,9 @@ const ( // Upload chunk size - setting too small makes uploads slow. // Chunks are buffered into memory for retries. // - // Speed vs chunk size uploading a 1 GB file on 2017-11-22 + // Speed vs chunk size uploading a 1 GiB file on 2017-11-22 // - // Chunk Size MB, Speed Mbyte/s, % of max + // Chunk Size MiB, Speed MiByte/s, % of max // 1 1.364 11% // 2 2.443 19% // 4 4.288 33% @@ -82,11 +82,11 @@ const ( // 96 12.302 95% // 128 12.945 100% // - // Choose 48MB which is 91% of Maximum speed. rclone by - // default does 4 transfers so this should use 4*48MB = 192MB + // Choose 48 MiB which is 91% of Maximum speed. rclone by + // default does 4 transfers so this should use 4*48 MiB = 192 MiB // by default. - defaultChunkSize = 48 * fs.MebiByte - maxChunkSize = 150 * fs.MebiByte + defaultChunkSize = 48 * fs.Mebi + maxChunkSize = 150 * fs.Mebi // Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing maxFileNameLength = 255 ) @@ -164,7 +164,7 @@ Any files larger than this will be uploaded in chunks of this size. Note that chunks are buffered in memory (one at a time) so rclone can deal with retries. Setting this larger will increase the speed -slightly (at most 10%% for 128MB in tests) at the cost of using more +slightly (at most 10%% for 128 MiB in tests) at the cost of using more memory. It can be set smaller if you are tight on memory.`, maxChunkSize), Default: defaultChunkSize, Advanced: true, @@ -325,7 +325,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) { } func checkUploadChunkSize(cs fs.SizeSuffix) error { - const minChunkSize = fs.Byte + const minChunkSize = fs.SizeSuffixBase if cs < minChunkSize { return errors.Errorf("%s is less than %s", cs, minChunkSize) } diff --git a/backend/koofr/koofr.go b/backend/koofr/koofr.go index fbe09a9db..58eea15f7 100644 --- a/backend/koofr/koofr.go +++ b/backend/koofr/koofr.go @@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string return nil } -// About reports space usage (with a MB precision) +// About reports space usage (with a MiB precision) func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { mount, err := f.client.MountsDetails(f.mountID) if err != nil { diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 6bb634024..5c5845ba4 100755 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -52,8 +52,8 @@ const ( driveTypePersonal = "personal" driveTypeBusiness = "business" driveTypeSharepoint = "documentLibrary" - defaultChunkSize = 10 * fs.MebiByte - chunkSizeMultiple = 320 * fs.KibiByte + defaultChunkSize = 10 * fs.Mebi + chunkSizeMultiple = 320 * fs.Kibi regionGlobal = "global" regionUS = "us" @@ -696,7 +696,7 @@ func errorHandler(resp *http.Response) error { } func checkUploadChunkSize(cs fs.SizeSuffix) error { - const minChunkSize = fs.Byte + const minChunkSize = fs.SizeSuffixBase if cs%chunkSizeMultiple != 0 { return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple) } @@ -1885,11 +1885,11 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, return info, nil } -// Update the content of a remote file within 4MB size in one single request +// Update the content of a remote file within 4 MiB size in one single request // This function will set modtime after uploading, which will create a new version for the remote file func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) { if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) { - return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB") + return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB") } fs.Debugf(o, "Starting singlepart upload") diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go index 982ddc833..75bea04f7 100644 --- a/backend/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -88,7 +88,7 @@ func init() { Note that these chunks are buffered in memory so increasing them will increase memory use.`, - Default: 10 * fs.MebiByte, + Default: 10 * fs.Mebi, Advanced: true, }}, }) diff --git a/backend/putio/putio.go b/backend/putio/putio.go index 3d9102d5d..07a6f0236 100644 --- a/backend/putio/putio.go +++ b/backend/putio/putio.go @@ -35,7 +35,7 @@ const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second decayConstant = 2 // bigger for slower decay, exponential - defaultChunkSize = 48 * fs.MebiByte + defaultChunkSize = 48 * fs.Mebi ) var ( diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index 8906b842e..1588a3629 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -80,7 +80,7 @@ func init() { Help: `Cutoff for switching to chunked upload Any files larger than this will be uploaded in chunks of chunk_size. -The minimum is 0 and the maximum is 5GB.`, +The minimum is 0 and the maximum is 5 GiB.`, Default: defaultUploadCutoff, Advanced: true, }, { diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 92bdb2ccb..d632d83e9 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -1016,7 +1016,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke Help: `Cutoff for switching to chunked upload Any files larger than this will be uploaded in chunks of chunk_size. -The minimum is 0 and the maximum is 5GB.`, +The minimum is 0 and the maximum is 5 GiB.`, Default: defaultUploadCutoff, Advanced: true, }, { @@ -1038,9 +1038,9 @@ Rclone will automatically increase the chunk size when uploading a large file of known size to stay below the 10,000 chunks limit. Files of unknown size are uploaded with the configured -chunk_size. Since the default chunk size is 5MB and there can be at +chunk_size. Since the default chunk size is 5 MiB and there can be at most 10,000 chunks, this means that by default the maximum size of -a file you can stream upload is 48GB. If you wish to stream upload +a file you can stream upload is 48 GiB. If you wish to stream upload larger files then you will need to increase chunk_size.`, Default: minChunkSize, Advanced: true, @@ -1066,7 +1066,7 @@ large file of a known size to stay below this number of chunks limit. Any files larger than this that need to be server-side copied will be copied in chunks of this size. -The minimum is 0 and the maximum is 5GB.`, +The minimum is 0 and the maximum is 5 GiB.`, Default: fs.SizeSuffix(maxSizeForCopy), Advanced: true, }, { @@ -1270,7 +1270,7 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl const ( metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime metaMD5Hash = "Md5chksum" // the meta key to store md5hash in - // The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility + // The maximum size of object we can COPY - this should be 5 GiB but is < 5 GB for b2 compatibility // See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76 maxSizeForCopy = 4768 * 1024 * 1024 maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload @@ -2991,9 +2991,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si // calculate size of parts partSize := int(f.opt.ChunkSize) - // size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize - // buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of - // 48GB which seems like a not too unreasonable limit. + // size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize + // buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of + // 48 GiB which seems like a not too unreasonable limit. if size == -1 { warnStreamUpload.Do(func() { fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v", @@ -3002,7 +3002,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si } else { // Adjust partSize until the number of parts is small enough. if size/int64(partSize) >= uploadParts { - // Calculate partition size rounded up to the nearest MB + // Calculate partition size rounded up to the nearest MiB partSize = int((((size / uploadParts) >> 20) + 1) << 20) } } diff --git a/backend/sharefile/sharefile.go b/backend/sharefile/sharefile.go index 5a160052c..fb33c4c95 100644 --- a/backend/sharefile/sharefile.go +++ b/backend/sharefile/sharefile.go @@ -110,10 +110,10 @@ const ( decayConstant = 2 // bigger for slower decay, exponential apiPath = "/sf/v3" // add to endpoint to get API path tokenPath = "/oauth/token" // add to endpoint to get Token path - minChunkSize = 256 * fs.KibiByte - maxChunkSize = 2 * fs.GibiByte - defaultChunkSize = 64 * fs.MebiByte - defaultUploadCutoff = 128 * fs.MebiByte + minChunkSize = 256 * fs.Kibi + maxChunkSize = 2 * fs.Gibi + defaultChunkSize = 64 * fs.Mebi + defaultUploadCutoff = 128 * fs.Mebi ) // Generate a new oauth2 config which we will update when we know the TokenURL diff --git a/backend/swift/swift.go b/backend/swift/swift.go index 203deae35..10d2bb8e3 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -36,7 +36,7 @@ import ( const ( directoryMarkerContentType = "application/directory" // content type of directory marker objects listChunks = 1000 // chunk size to read directory listings - defaultChunkSize = 5 * fs.GibiByte + defaultChunkSize = 5 * fs.Gibi minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep. ) @@ -46,7 +46,7 @@ var SharedOptions = []fs.Option{{ Help: `Above this size files will be chunked into a _segments container. Above this size files will be chunked into a _segments container. The -default for this is 5GB which is its maximum value.`, +default for this is 5 GiB which is its maximum value.`, Default: defaultChunkSize, Advanced: true, }, { @@ -56,7 +56,7 @@ default for this is 5GB which is its maximum value.`, When doing streaming uploads (e.g. using rcat or mount) setting this flag will cause the swift backend to not upload chunked files. -This will limit the maximum upload size to 5GB. However non chunked +This will limit the maximum upload size to 5 GiB. However non chunked files are easier to deal with and have an MD5SUM. Rclone will still chunk files bigger than chunk_size when doing normal @@ -419,7 +419,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con } func checkUploadChunkSize(cs fs.SizeSuffix) error { - const minChunkSize = fs.Byte + const minChunkSize = fs.SizeSuffixBase if cs < minChunkSize { return errors.Errorf("%s is less than %s", cs, minChunkSize) } diff --git a/backend/swift/swift_test.go b/backend/swift/swift_test.go index cd3632517..4669d9e90 100644 --- a/backend/swift/swift_test.go +++ b/backend/swift/swift_test.go @@ -87,7 +87,7 @@ func (f *Fs) testWithChunk(t *testing.T) { preConfChunkSize := f.opt.ChunkSize preConfChunk := f.opt.NoChunk f.opt.NoChunk = false - f.opt.ChunkSize = 1024 * fs.Byte + f.opt.ChunkSize = 1024 * fs.SizeSuffixBase defer func() { //restore old config after test f.opt.ChunkSize = preConfChunkSize @@ -117,7 +117,7 @@ func (f *Fs) testWithChunkFail(t *testing.T) { preConfChunkSize := f.opt.ChunkSize preConfChunk := f.opt.NoChunk f.opt.NoChunk = false - f.opt.ChunkSize = 1024 * fs.Byte + f.opt.ChunkSize = 1024 * fs.SizeSuffixBase segmentContainer := f.root + "_segments" defer func() { //restore config @@ -159,7 +159,7 @@ func (f *Fs) testCopyLargeObject(t *testing.T) { preConfChunkSize := f.opt.ChunkSize preConfChunk := f.opt.NoChunk f.opt.NoChunk = false - f.opt.ChunkSize = 1024 * fs.Byte + f.opt.ChunkSize = 1024 * fs.SizeSuffixBase defer func() { //restore old config after test f.opt.ChunkSize = preConfChunkSize diff --git a/cmd/delete/delete.go b/cmd/delete/delete.go index 6fdd7be0c..19816cb3a 100644 --- a/cmd/delete/delete.go +++ b/cmd/delete/delete.go @@ -36,8 +36,8 @@ If you supply the |--rmdirs| flag, it will remove all empty directories along wi You can also use the separate command |rmdir| or |rmdirs| to delete empty directories only. -For example, to delete all files bigger than 100MBytes, you may first want to check what -would be deleted (use either): +For example, to delete all files bigger than 100 MiB, you may first want to +check what would be deleted (use either): rclone --min-size 100M lsl remote:path rclone --dry-run --min-size 100M delete remote:path @@ -46,8 +46,8 @@ Then proceed with the actual delete: rclone --min-size 100M delete remote:path -That reads "delete everything with a minimum size of 100 MB", hence -delete all files bigger than 100MBytes. +That reads "delete everything with a minimum size of 100 MiB", hence +delete all files bigger than 100 MiB. **Important**: Since this can cause data loss, test first with the |--dry-run| or the |--interactive|/|-i| flag. diff --git a/cmd/mountlib/mount.go b/cmd/mountlib/mount.go index 34380ccd5..b417223e6 100644 --- a/cmd/mountlib/mount.go +++ b/cmd/mountlib/mount.go @@ -206,9 +206,9 @@ When that happens, it is the user's responsibility to stop the mount manually. The size of the mounted file system will be set according to information retrieved from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/) command. Remotes with unlimited storage may report the used size only, -then an additional 1PB of free space is assumed. If the remote does not +then an additional 1 PiB of free space is assumed. If the remote does not [support](https://rclone.org/overview/#optional-features) the about feature -at all, then 1PB is set as both the total and the free size. +at all, then 1 PiB is set as both the total and the free size. **Note**: As of |rclone| 1.52.2, |rclone mount| now requires Go version 1.13 or newer on some platforms depending on the underlying FUSE library in use. diff --git a/cmd/ncdu/ncdu.go b/cmd/ncdu/ncdu.go index 49e12a887..b3cce5f69 100644 --- a/cmd/ncdu/ncdu.go +++ b/cmd/ncdu/ncdu.go @@ -385,9 +385,9 @@ func (u *UI) Draw() error { } if u.showDirAverageSize { if averageSize > 0 { - extras += fmt.Sprintf("%8v ", fs.SizeSuffix(int64(averageSize))) + extras += fmt.Sprintf("%9v ", fs.SizeSuffix(int64(averageSize))) } else { - extras += " " + extras += " " } } @@ -406,7 +406,7 @@ func (u *UI) Draw() error { } extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] " } - Linef(0, y, w, fg, bg, ' ', "%c %8v %s%c%s%s", fileFlag, fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message) + Linef(0, y, w, fg, bg, ' ', "%c %9v %s%c%s%s", fileFlag, fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message) y++ } } diff --git a/cmd/serve/httplib/serve/data/templates/index.html b/cmd/serve/httplib/serve/data/templates/index.html index 1c30fb79b..348050c02 100644 --- a/cmd/serve/httplib/serve/data/templates/index.html +++ b/cmd/serve/httplib/serve/data/templates/index.html @@ -367,7 +367,7 @@ footer { } }; function readableFileSize(size) { - var units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + var units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']; var i = 0; while(size >= 1024) { size /= 1024; diff --git a/cmd/size/size.go b/cmd/size/size.go index 2660a83c4..881d4cace 100644 --- a/cmd/size/size.go +++ b/cmd/size/size.go @@ -44,7 +44,7 @@ var commandDefinition = &cobra.Command{ } fmt.Printf("Total objects: %d\n", results.Count) - fmt.Printf("Total size: %s (%d Bytes)\n", fs.SizeSuffix(results.Bytes).Unit("Bytes"), results.Bytes) + fmt.Printf("Total size: %s (%d bytes)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes) return nil }) diff --git a/docs/content/amazonclouddrive.md b/docs/content/amazonclouddrive.md index a16175408..28d71c08e 100644 --- a/docs/content/amazonclouddrive.md +++ b/docs/content/amazonclouddrive.md @@ -227,16 +227,16 @@ Checkpoint for internal polling (debug). #### --acd-upload-wait-per-gb -Additional time per GB to wait after a failed complete upload to see if it appears. +Additional time per GiB to wait after a failed complete upload to see if it appears. Sometimes Amazon Drive gives an error when a file has been fully uploaded but the file appears anyway after a little while. This -happens sometimes for files over 1GB in size and nearly every time for -files bigger than 10GB. This parameter controls the time rclone waits +happens sometimes for files over 1 GiB in size and nearly every time for +files bigger than 10 GiB. This parameter controls the time rclone waits for the file to appear. -The default value for this parameter is 3 minutes per GB, so by -default it will wait 3 minutes for every GB uploaded to see if the +The default value for this parameter is 3 minutes per GiB, so by +default it will wait 3 minutes for every GiB uploaded to see if the file appears. You can disable this feature by setting it to 0. This may cause @@ -260,7 +260,7 @@ Files >= this size will be downloaded via their tempLink. Files this size or more will be downloaded via their "tempLink". This is to work around a problem with Amazon Drive which blocks downloads -of files bigger than about 10GB. The default for this is 9GB which +of files bigger than about 10 GiB. The default for this is 9 GiB which shouldn't need to be changed. To download files above this threshold, rclone requests a "tempLink" @@ -299,7 +299,7 @@ Amazon Drive has an internal limit of file sizes that can be uploaded to the service. This limit is not officially published, but all files larger than this will fail. -At the time of writing (Jan 2016) is in the area of 50GB per file. +At the time of writing (Jan 2016) is in the area of 50 GiB per file. This means that larger files are likely to fail. Unfortunately there is no way for rclone to see that this failure is diff --git a/docs/content/azureblob.md b/docs/content/azureblob.md index 842e87596..0d255d26f 100644 --- a/docs/content/azureblob.md +++ b/docs/content/azureblob.md @@ -269,7 +269,7 @@ Leave blank normally. #### --azureblob-upload-cutoff -Cutoff for switching to chunked upload (<= 256MB). (Deprecated) +Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated) - Config: upload_cutoff - Env Var: RCLONE_AZUREBLOB_UPLOAD_CUTOFF @@ -278,7 +278,7 @@ Cutoff for switching to chunked upload (<= 256MB). (Deprecated) #### --azureblob-chunk-size -Upload chunk size (<= 100MB). +Upload chunk size (<= 100 MiB). Note that this is stored in memory and there may be up to "--transfers" chunks stored at once in memory. diff --git a/docs/content/b2.md b/docs/content/b2.md index 4967f7415..6314b2478 100644 --- a/docs/content/b2.md +++ b/docs/content/b2.md @@ -155,8 +155,8 @@ depending on your hardware, how big the files are, how much you want to load your computer, etc. The default of `--transfers 4` is definitely too low for Backblaze B2 though. -Note that uploading big files (bigger than 200 MB by default) will use -a 96 MB RAM buffer by default. There can be at most `--transfers` of +Note that uploading big files (bigger than 200 MiB by default) will use +a 96 MiB RAM buffer by default. There can be at most `--transfers` of these in use at any moment, so this sets the upper limit on the memory used. @@ -401,7 +401,7 @@ Cutoff for switching to chunked upload. Files above this size will be uploaded in chunks of "--b2-chunk-size". -This value should be set no larger than 4.657GiB (== 5GB). +This value should be set no larger than 4.657 GiB (== 5 GB). - Config: upload_cutoff - Env Var: RCLONE_B2_UPLOAD_CUTOFF @@ -415,7 +415,7 @@ Cutoff for switching to multipart copy Any files larger than this that need to be server-side copied will be copied in chunks of this size. -The minimum is 0 and the maximum is 4.6GB. +The minimum is 0 and the maximum is 4.6 GiB. - Config: copy_cutoff - Env Var: RCLONE_B2_COPY_CUTOFF diff --git a/docs/content/box.md b/docs/content/box.md index a1d7710b3..4a175ac85 100644 --- a/docs/content/box.md +++ b/docs/content/box.md @@ -225,10 +225,10 @@ as they can't be used in JSON strings. ### Transfers ### -For files above 50MB rclone will use a chunked transfer. Rclone will +For files above 50 MiB rclone will use a chunked transfer. Rclone will upload up to `--transfers` chunks at the same time (shared among all the multipart uploads). Chunks are buffered in memory and are -normally 8MB so increasing `--transfers` will increase memory use. +normally 8 MiB so increasing `--transfers` will increase memory use. ### Deleting files ### @@ -369,7 +369,7 @@ Fill in for rclone to use a non root folder as its starting point. #### --box-upload-cutoff -Cutoff for switching to multipart upload (>= 50MB). +Cutoff for switching to multipart upload (>= 50 MiB). - Config: upload_cutoff - Env Var: RCLONE_BOX_UPLOAD_CUTOFF diff --git a/docs/content/cache.md b/docs/content/cache.md index c18ab19e1..6b9e8a3fc 100644 --- a/docs/content/cache.md +++ b/docs/content/cache.md @@ -70,11 +70,11 @@ password: The size of a chunk. Lower value good for slow connections but can affect seamless reading. Default: 5M Choose a number from below, or type in your own value - 1 / 1MB - \ "1m" - 2 / 5 MB + 1 / 1 MiB + \ "1M" + 2 / 5 MiB \ "5M" - 3 / 10 MB + 3 / 10 MiB \ "10M" chunk_size> 2 How much time should object info (file size, file hashes, etc.) be stored in cache. Use a very high value if you don't plan on changing the source FS from outside the cache. @@ -91,11 +91,11 @@ info_age> 2 The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted. Default: 10G Choose a number from below, or type in your own value - 1 / 500 MB + 1 / 500 MiB \ "500M" - 2 / 1 GB + 2 / 1 GiB \ "1G" - 3 / 10 GB + 3 / 10 GiB \ "10G" chunk_total_size> 3 Remote config @@ -364,11 +364,11 @@ will need to be cleared or unexpected EOF errors will occur. - Default: 5M - Examples: - "1m" - - 1MB + - 1 MiB - "5M" - - 5 MB + - 5 MiB - "10M" - - 10 MB + - 10 MiB #### --cache-info-age @@ -401,11 +401,11 @@ oldest chunks until it goes under this value. - Default: 10G - Examples: - "500M" - - 500 MB + - 500 MiB - "1G" - - 1 GB + - 1 GiB - "10G" - - 10 GB + - 10 GiB ### Advanced Options diff --git a/docs/content/chunker.md b/docs/content/chunker.md index 710df3a3a..0dfe9b482 100644 --- a/docs/content/chunker.md +++ b/docs/content/chunker.md @@ -43,7 +43,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir", Enter a string value. Press Enter for the default (""). remote> remote:path Files larger than chunk size will be split in chunks. -Enter a size with suffix k,M,G,T. Press Enter for the default ("2G"). +Enter a size with suffix K,M,G,T. Press Enter for the default ("2G"). chunk_size> 100M Choose how chunker handles hash sums. All modes but "none" require metadata. Enter a string value. Press Enter for the default ("md5"). diff --git a/docs/content/commands/rclone_delete.md b/docs/content/commands/rclone_delete.md index 7514b4e9a..258f7509c 100644 --- a/docs/content/commands/rclone_delete.md +++ b/docs/content/commands/rclone_delete.md @@ -23,7 +23,7 @@ If you supply the `--rmdirs` flag, it will remove all empty directories along wi You can also use the separate command `rmdir` or `rmdirs` to delete empty directories only. -For example, to delete all files bigger than 100MBytes, you may first want to check what +For example, to delete all files bigger than 100 MiByte, you may first want to check what would be deleted (use either): rclone --min-size 100M lsl remote:path @@ -33,8 +33,8 @@ Then proceed with the actual delete: rclone --min-size 100M delete remote:path -That reads "delete everything with a minimum size of 100 MB", hence -delete all files bigger than 100MBytes. +That reads "delete everything with a minimum size of 100 MiB", hence +delete all files bigger than 100 MiByte. **Important**: Since this can cause data loss, test first with the `--dry-run` or the `--interactive`/`-i` flag. diff --git a/docs/content/commands/rclone_mount.md b/docs/content/commands/rclone_mount.md index 511711d35..7d4c36b76 100644 --- a/docs/content/commands/rclone_mount.md +++ b/docs/content/commands/rclone_mount.md @@ -56,9 +56,9 @@ When that happens, it is the user's responsibility to stop the mount manually. The size of the mounted file system will be set according to information retrieved from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/) command. Remotes with unlimited storage may report the used size only, -then an additional 1PB of free space is assumed. If the remote does not +then an additional 1 PiB of free space is assumed. If the remote does not [support](https://rclone.org/overview/#optional-features) the about feature -at all, then 1PB is set as both the total and the free size. +at all, then 1 PiB is set as both the total and the free size. **Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13 or newer on some platforms depending on the underlying FUSE library in use. diff --git a/docs/content/crypt.md b/docs/content/crypt.md index 3055f8ff9..9192139e1 100644 --- a/docs/content/crypt.md +++ b/docs/content/crypt.md @@ -627,7 +627,7 @@ approximately 2×10⁻³² of re-using a nonce. #### Chunk -Each chunk will contain 64kB of data, except for the last one which +Each chunk will contain 64 KiB of data, except for the last one which may have less data. The data chunk is in standard NaCl SecretBox format. SecretBox uses XSalsa20 and Poly1305 to encrypt and authenticate messages. @@ -653,12 +653,12 @@ This uses a 32 byte (256 bit key) key derived from the user password. 49 bytes total -1MB (1048576 bytes) file will encrypt to +1 MiB (1048576 bytes) file will encrypt to * 32 bytes header * 16 chunks of 65568 bytes -1049120 bytes total (a 0.05% overhead). This is the overhead for big +1049120 bytes total (a 0.05% overhead). This is the overhead for big files. ### Name encryption diff --git a/docs/content/docs.md b/docs/content/docs.md index a382e8db3..ce4c0d52c 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -421,10 +421,10 @@ possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -Options which use SIZE use kByte by default. However, a suffix of `b` -for bytes, `k` for kByte, `M` for MByte, `G` for GByte, `T` for -TByte and `P` for PByte may be used. These are the binary units, e.g. -1, 2\*\*10, 2\*\*20, 2\*\*30 respectively. +Options which use SIZE use KiByte (multiples of 1024 bytes) by default. +However, a suffix of `B` for Byte, `K` for KiByte, `M` for MiByte, +`G` for GiByte, `T` for TiByte and `P` for PiByte may be used. These are +the binary units, e.g. 1, 2\*\*10, 2\*\*20, 2\*\*30 respectively. ### --backup-dir=DIR ### @@ -467,23 +467,23 @@ This option controls the bandwidth limit. For example --bwlimit 10M -would mean limit the upload and download bandwidth to 10 MByte/s. +would mean limit the upload and download bandwidth to 10 MiByte/s. **NB** this is **bytes** per second not **bits** per second. To use a -single limit, specify the desired bandwidth in kByte/s, or use a -suffix b|k|M|G. The default is `0` which means to not limit bandwidth. +single limit, specify the desired bandwidth in KiByte/s, or use a +suffix B|K|M|G|T|P. The default is `0` which means to not limit bandwidth. The upload and download bandwidth can be specified seperately, as `--bwlimit UP:DOWN`, so --bwlimit 10M:100k -would mean limit the upload bandwidth to 10 MByte/s and the download -bandwidth to 100 kByte/s. Either limit can be "off" meaning no limit, so +would mean limit the upload bandwidth to 10 MiByte/s and the download +bandwidth to 100 KiByte/s. Either limit can be "off" meaning no limit, so to just limit the upload bandwidth you would use --bwlimit 10M:off -this would limit the upload bandwidth to 10 MByte/s but the download +this would limit the upload bandwidth to 10 MiByte/s but the download bandwidth would be unlimited. When specified as above the bandwidth limits last for the duration of @@ -505,19 +505,19 @@ working hours could be: `--bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off"` -In this example, the transfer bandwidth will be set to 512 kByte/s -at 8am every day. At noon, it will rise to 10 MByte/s, and drop back -to 512 kByte/sec at 1pm. At 6pm, the bandwidth limit will be set to -30 MByte/s, and at 11pm it will be completely disabled (full speed). +In this example, the transfer bandwidth will be set to 512 KiByte/s +at 8am every day. At noon, it will rise to 10 MiByte/s, and drop back +to 512 KiByte/sec at 1pm. At 6pm, the bandwidth limit will be set to +30 MiByte/s, and at 11pm it will be completely disabled (full speed). Anything between 11pm and 8am will remain unlimited. An example of timetable with `WEEKDAY` could be: `--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"` -It means that, the transfer bandwidth will be set to 512 kByte/s on -Monday. It will rise to 10 MByte/s before the end of Friday. At 10:00 -on Saturday it will be set to 1 MByte/s. From 20:00 on Sunday it will +It means that, the transfer bandwidth will be set to 512 KiByte/s on +Monday. It will rise to 10 MiByte/s before the end of Friday. At 10:00 +on Saturday it will be set to 1 MiByte/s. From 20:00 on Sunday it will be unlimited. Timeslots without `WEEKDAY` are extended to the whole week. So this @@ -536,7 +536,7 @@ being the non HTTP backends, `ftp`, `sftp` and `tardigrade`). Note that the units are **Byte/s**, not **bit/s**. Typically connections are measured in bit/s - to convert divide by 8. For example, let's say you have a 10 Mbit/s connection and you wish rclone -to use half of it - 5 Mbit/s. This is 5/8 = 0.625 MByte/s so you would +to use half of it - 5 Mbit/s. This is 5/8 = 0.625 MiByte/s so you would use a `--bwlimit 0.625M` parameter for rclone. On Unix systems (Linux, macOS, …) the bandwidth limiter can be toggled by @@ -557,7 +557,7 @@ change the bwlimit dynamically: This option controls per file bandwidth limit. For the options see the `--bwlimit` flag. -For example use this to allow no transfers to be faster than 1 MByte/s +For example use this to allow no transfers to be faster than 1 MiByte/s --bwlimit-file 1M @@ -770,7 +770,7 @@ which feature does what. This flag can be useful for debugging and in exceptional circumstances (e.g. Google Drive limiting the total volume of Server Side Copies to -100GB/day). +100 GiB/day). ### --dscp VALUE ### @@ -1047,7 +1047,7 @@ This is the maximum allowable backlog of files in a sync/copy/move queued for being checked or transferred. This can be set arbitrarily large. It will only use memory when the -queue is in use. Note that it will use in the order of N kB of memory +queue is in use. Note that it will use in the order of N KiB of memory when the backlog is in use. Setting this large allows rclone to calculate how many files are @@ -1176,13 +1176,13 @@ size of the file. To calculate the number of download streams Rclone divides the size of the file by the `--multi-thread-cutoff` and rounds up, up to the maximum set with `--multi-thread-streams`. -So if `--multi-thread-cutoff 250MB` and `--multi-thread-streams 4` are +So if `--multi-thread-cutoff 250M` and `--multi-thread-streams 4` are in effect (the defaults): -- 0MB..250MB files will be downloaded with 1 stream -- 250MB..500MB files will be downloaded with 2 streams -- 500MB..750MB files will be downloaded with 3 streams -- 750MB+ files will be downloaded with 4 streams +- 0..250 MiB files will be downloaded with 1 stream +- 250..500 MiB files will be downloaded with 2 streams +- 500..750 MiB files will be downloaded with 3 streams +- 750+ MiB files will be downloaded with 4 streams ### --no-check-dest ### diff --git a/docs/content/docs.md.orig b/docs/content/docs.md.orig new file mode 100644 index 000000000..b546af185 --- /dev/null +++ b/docs/content/docs.md.orig @@ -0,0 +1,2188 @@ +--- +title: "Documentation" +description: "Rclone Usage" +--- + +Configure +--------- + +First, you'll need to configure rclone. As the object storage systems +have quite complicated authentication these are kept in a config file. +(See the [`--config`](#config-config-file) entry for how to find the config +file and choose its location.) + +The easiest way to make the config is to run rclone with the config +option: + + rclone config + +See the following for detailed instructions for + + * [1Fichier](/fichier/) + * [Alias](/alias/) + * [Amazon Drive](/amazonclouddrive/) + * [Amazon S3](/s3/) + * [Backblaze B2](/b2/) + * [Box](/box/) + * [Cache](/cache/) + * [Chunker](/chunker/) - transparently splits large files for other remotes + * [Citrix ShareFile](/sharefile/) + * [Compress](/compress/) + * [Crypt](/crypt/) - to encrypt other remotes + * [DigitalOcean Spaces](/s3/#digitalocean-spaces) + * [Dropbox](/dropbox/) + * [Enterprise File Fabric](/filefabric/) + * [FTP](/ftp/) + * [Google Cloud Storage](/googlecloudstorage/) + * [Google Drive](/drive/) + * [Google Photos](/googlephotos/) + * [HDFS](/hdfs/) + * [HTTP](/http/) + * [Hubic](/hubic/) + * [Jottacloud / GetSky.no](/jottacloud/) + * [Koofr](/koofr/) + * [Mail.ru Cloud](/mailru/) + * [Mega](/mega/) + * [Memory](/memory/) + * [Microsoft Azure Blob Storage](/azureblob/) + * [Microsoft OneDrive](/onedrive/) + * [OpenStack Swift / Rackspace Cloudfiles / Memset Memstore](/swift/) + * [OpenDrive](/opendrive/) + * [Pcloud](/pcloud/) + * [premiumize.me](/premiumizeme/) + * [put.io](/putio/) + * [QingStor](/qingstor/) + * [Seafile](/seafile/) + * [SFTP](/sftp/) + * [SugarSync](/sugarsync/) + * [Tardigrade](/tardigrade/) + * [Union](/union/) + * [WebDAV](/webdav/) + * [Yandex Disk](/yandex/) + * [Zoho WorkDrive](/zoho/) + * [The local filesystem](/local/) + +Usage +----- + +Rclone syncs a directory tree from one storage system to another. + +Its syntax is like this + + Syntax: [options] subcommand + +Source and destination paths are specified by the name you gave the +storage system in the config file then the sub path, e.g. +"drive:myfolder" to look at "myfolder" in Google drive. + +You can define as many storage paths as you like in the config file. + +Please use the [`-i` / `--interactive`](#interactive) flag while +learning rclone to avoid accidental data loss. + +Subcommands +----------- + +rclone uses a system of subcommands. For example + + rclone ls remote:path # lists a remote + rclone copy /local/path remote:path # copies /local/path to the remote + rclone sync -i /local/path remote:path # syncs /local/path to the remote + +The main rclone commands with most used first + +* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. +* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied. +* [rclone sync](/commands/rclone_sync/) - Make source and dest identical, modifying destination only. +* [rclone move](/commands/rclone_move/) - Move files from source to dest. +* [rclone delete](/commands/rclone_delete/) - Remove the contents of path. +* [rclone purge](/commands/rclone_purge/) - Remove the path and all of its contents. +* [rclone mkdir](/commands/rclone_mkdir/) - Make the path if it doesn't already exist. +* [rclone rmdir](/commands/rclone_rmdir/) - Remove the path. +* [rclone rmdirs](/commands/rclone_rmdirs/) - Remove any empty directories under the path. +* [rclone check](/commands/rclone_check/) - Check if the files in the source and destination match. +* [rclone ls](/commands/rclone_ls/) - List all the objects in the path with size and path. +* [rclone lsd](/commands/rclone_lsd/) - List all directories/containers/buckets in the path. +* [rclone lsl](/commands/rclone_lsl/) - List all the objects in the path with size, modification time and path. +* [rclone md5sum](/commands/rclone_md5sum/) - Produce an md5sum file for all the objects in the path. +* [rclone sha1sum](/commands/rclone_sha1sum/) - Produce a sha1sum file for all the objects in the path. +* [rclone size](/commands/rclone_size/) - Return the total size and number of objects in remote:path. +* [rclone version](/commands/rclone_version/) - Show the version number. +* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible. +* [rclone dedupe](/commands/rclone_dedupe/) - Interactively find duplicate files and delete/rename them. +* [rclone authorize](/commands/rclone_authorize/) - Remote authorization. +* [rclone cat](/commands/rclone_cat/) - Concatenate any files and send them to stdout. +* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied. +* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output shell completion scripts for rclone. +* [rclone gendocs](/commands/rclone_gendocs/) - Output markdown docs for rclone to the directory supplied. +* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file. +* [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint. +* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest. +* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone.conf +* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Check the integrity of a crypted remote. +* [rclone about](/commands/rclone_about/) - Get quota information from the remote. + +See the [commands index](/commands/) for the full list. + +Copying single files +-------------------- + +rclone normally syncs or copies directories. However, if the source +remote points to a file, rclone will just copy that file. The +destination remote must point to a directory - rclone will give the +error `Failed to create file system for "remote:file": is a file not a +directory` if it isn't. + +For example, suppose you have a remote with a file in called +`test.jpg`, then you could copy just that file like this + + rclone copy remote:test.jpg /tmp/download + +The file `test.jpg` will be placed inside `/tmp/download`. + +This is equivalent to specifying + + rclone copy --files-from /tmp/files remote: /tmp/download + +Where `/tmp/files` contains the single line + + test.jpg + +It is recommended to use `copy` when copying individual files, not `sync`. +They have pretty much the same effect but `copy` will use a lot less +memory. + +Syntax of remote paths +---------------------- + +The syntax of the paths passed to the rclone command are as follows. + +### /path/to/dir + +This refers to the local file system. + +On Windows `\` may be used instead of `/` in local paths **only**, +non local paths must use `/`. See [local filesystem](https://rclone.org/local/#windows-paths) +documentation for more about Windows-specific paths. + +These paths needn't start with a leading `/` - if they don't then they +will be relative to the current directory. + +### remote:path/to/dir + +This refers to a directory `path/to/dir` on `remote:` as defined in +the config file (configured with `rclone config`). + +### remote:/path/to/dir + +On most backends this is refers to the same directory as +`remote:path/to/dir` and that format should be preferred. On a very +small number of remotes (FTP, SFTP, Dropbox for business) this will +refer to a different directory. On these, paths without a leading `/` +will refer to your "home" directory and paths with a leading `/` will +refer to the root. + +### :backend:path/to/dir + +This is an advanced form for creating remotes on the fly. `backend` +should be the name or prefix of a backend (the `type` in the config +file) and all the configuration for the backend should be provided on +the command line (or in environment variables). + +Here are some examples: + + rclone lsd --http-url https://pub.rclone.org :http: + +To list all the directories in the root of `https://pub.rclone.org/`. + + rclone lsf --http-url https://example.com :http:path/to/dir + +To list files and directories in `https://example.com/path/to/dir/` + + rclone copy --http-url https://example.com :http:path/to/dir /tmp/dir + +To copy files and directories in `https://example.com/path/to/dir` to `/tmp/dir`. + + rclone copy --sftp-host example.com :sftp:path/to/dir /tmp/dir + +To copy files and directories from `example.com` in the relative +directory `path/to/dir` to `/tmp/dir` using sftp. + +### Connection strings {#connection-strings} + +The above examples can also be written using a connection string +syntax, so instead of providing the arguments as command line +parameters `--http-url https://pub.rclone.org` they are provided as +part of the remote specification as a kind of connection string. + + rclone lsd ":http,url='https://pub.rclone.org':" + rclone lsf ":http,url='https://example.com':path/to/dir" + rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir + rclone copy :sftp,host=example.com:path/to/dir /tmp/dir + +These can apply to modify existing remotes as well as create new +remotes with the on the fly syntax. This example is equivalent to +adding the `--drive-shared-with-me` parameter to the remote `gdrive:`. + + rclone lsf "gdrive,shared_with_me:path/to/dir" + +The major advantage to using the connection string style syntax is +that it only applies the the remote, not to all the remotes of that +type of the command line. A common confusion is this attempt to copy a +file shared on google drive to the normal drive which **does not +work** because the `--drive-shared-with-me` flag applies to both the +source and the destination. + + rclone copy --drive-shared-with-me gdrive:shared-file.txt gdrive: + +However using the connection string syntax, this does work. + + rclone copy "gdrive,shared_with_me:shared-file.txt" gdrive: + +The connection strings have the following syntax + + remote,parameter=value,parameter2=value2:path/to/dir + :backend,parameter=value,parameter2=value2:path/to/dir + +If the `parameter` has a `:` or `,` then it must be placed in quotes `"` or +`'`, so + + remote,parameter="colon:value",parameter2="comma,value":path/to/dir + :backend,parameter='colon:value',parameter2='comma,value':path/to/dir + +If a quoted value needs to include that quote, then it should be +doubled, so + + remote,parameter="with""quote",parameter2='with''quote':path/to/dir + +This will make `parameter` be `with"quote` and `parameter2` be +`with'quote`. + +If you leave off the `=parameter` then rclone will substitute `=true` +which works very well with flags. For example to use s3 configured in +the environment you could use: + + rclone lsd :s3,env_auth: + +Which is equivalent to + + rclone lsd :s3,env_auth=true: + +Note that on the command line you might need to surround these +connection strings with `"` or `'` to stop the shell interpreting any +special characters within them. + +If you are a shell master then you'll know which strings are OK and +which aren't, but if you aren't sure then enclose them in `"` and use +`'` as the inside quote. This syntax works on all OSes. + + rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir + +On Linux/macOS some characters are still interpreted inside `"` +strings in the shell (notably `\` and `$` and `"`) so if your strings +contain those you can swap the roles of `"` and `'` thus. (This syntax +does not work on Windows.) + + rclone copy ':http,url="https://example.com":path/to/dir' /tmp/dir + +#### Connection strings, config and logging + +If you supply extra configuration to a backend by command line flag, +environment variable or connection string then rclone will add a +suffix based on the hash of the config to the name of the remote, eg + + rclone -vv lsf --s3-chunk-size 20M s3: + +Has the log message + + DEBUG : s3: detected overridden config - adding "{Srj1p}" suffix to name + +This is so rclone can tell the modified remote apart from the +unmodified remote when caching the backends. + +This should only be noticeable in the logs. + +This means that on the fly backends such as + + rclone -vv lsf :s3,env_auth: + +Will get their own names + + DEBUG : :s3: detected overridden config - adding "{YTu53}" suffix to name + +### Valid remote names + + - Remote names may only contain 0-9, A-Z ,a-z ,_ , - and space. + - Remote names may not start with -. + +Quoting and the shell +--------------------- + +When you are typing commands to your computer you are using something +called the command line shell. This interprets various characters in +an OS specific way. + +Here are some gotchas which may help users unfamiliar with the shell rules + +### Linux / OSX ### + +If your names have spaces or shell metacharacters (e.g. `*`, `?`, `$`, +`'`, `"`, etc.) then you must quote them. Use single quotes `'` by default. + + rclone copy 'Important files?' remote:backup + +If you want to send a `'` you will need to use `"`, e.g. + + rclone copy "O'Reilly Reviews" remote:backup + +The rules for quoting metacharacters are complicated and if you want +the full details you'll have to consult the manual page for your +shell. + +### Windows ### + +If your names have spaces in you need to put them in `"`, e.g. + + rclone copy "E:\folder name\folder name\folder name" remote:backup + +If you are using the root directory on its own then don't quote it +(see [#464](https://github.com/rclone/rclone/issues/464) for why), e.g. + + rclone copy E:\ remote:backup + +Copying files or directories with `:` in the names +-------------------------------------------------- + +rclone uses `:` to mark a remote name. This is, however, a valid +filename component in non-Windows OSes. The remote name parser will +only search for a `:` up to the first `/` so if you need to act on a +file or directory like this then use the full path starting with a +`/`, or use `./` as a current directory prefix. + +So to sync a directory called `sync:me` to a remote called `remote:` use + + rclone sync -i ./sync:me remote:path + +or + + rclone sync -i /full/path/to/sync:me remote:path + +Server Side Copy +---------------- + +Most remotes (but not all - see [the +overview](/overview/#optional-features)) support server-side copy. + +This means if you want to copy one folder to another then rclone won't +download all the files and re-upload them; it will instruct the server +to copy them in place. + +Eg + + rclone copy s3:oldbucket s3:newbucket + +Will copy the contents of `oldbucket` to `newbucket` without +downloading and re-uploading. + +Remotes which don't support server-side copy **will** download and +re-upload in this case. + +Server side copies are used with `sync` and `copy` and will be +identified in the log when using the `-v` flag. The `move` command +may also use them if remote doesn't support server-side move directly. +This is done by issuing a server-side copy then a delete which is much +quicker than a download and re-upload. + +Server side copies will only be attempted if the remote names are the +same. + +This can be used when scripting to make aged backups efficiently, e.g. + + rclone sync -i remote:current-backup remote:previous-backup + rclone sync -i /path/to/files remote:current-backup + +Options +------- + +Rclone has a number of options to control its behaviour. + +Options that take parameters can have the values passed in two ways, +`--option=value` or `--option value`. However boolean (true/false) +options behave slightly differently to the other options in that +`--boolean` sets the option to `true` and the absence of the flag sets +it to `false`. It is also possible to specify `--boolean=false` or +`--boolean=true`. Note that `--boolean false` is not valid - this is +parsed as `--boolean` and the `false` is parsed as an extra command +line argument for rclone. + +Options which use TIME use the go time parser. A duration string is a +possibly signed sequence of decimal numbers, each with optional +fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid +time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + +Options which use SIZE use kByte by default. However, a suffix of `b` +for bytes, `k` for kBytes, `M` for MBytes, `G` for GBytes, `T` for +TBytes and `P` for PBytes may be used. These are the binary units, e.g. +1, 2\*\*10, 2\*\*20, 2\*\*30 respectively. + +### --backup-dir=DIR ### + +When using `sync`, `copy` or `move` any files which would have been +overwritten or deleted are moved in their original hierarchy into this +directory. + +If `--suffix` is set, then the moved files will have the suffix added +to them. If there is a file with the same path (after the suffix has +been added) in DIR, then it will be overwritten. + +The remote in use must support server-side move or copy and you must +use the same remote as the destination of the sync. The backup +directory must not overlap the destination directory. + +For example + + rclone sync -i /path/to/local remote:current --backup-dir remote:old + +will sync `/path/to/local` to `remote:current`, but for any files +which would have been updated or deleted will be stored in +`remote:old`. + +If running rclone from a script you might want to use today's date as +the directory name passed to `--backup-dir` to store the old files, or +you might want to pass `--suffix` with today's date. + +See `--compare-dest` and `--copy-dest`. + +### --bind string ### + +Local address to bind to for outgoing connections. This can be an +IPv4 address (1.2.3.4), an IPv6 address (1234::789A) or host name. If +the host name doesn't resolve or resolves to more than one IP address +it will give an error. + +### --bwlimit=BANDWIDTH_SPEC ### + +This option controls the bandwidth limit. For example + + --bwlimit 10M + +would mean limit the upload and download bandwidth to 10 MByte/s. +**NB** this is **bytes** per second not **bits** per second. To use a +single limit, specify the desired bandwidth in kBytes/s, or use a +suffix b|k|M|G. The default is `0` which means to not limit bandwidth. + +The upload and download bandwidth can be specified seperately, as +`--bwlimit UP:DOWN`, so + + --bwlimit 10M:100k + +would mean limit the upload bandwidth to 10 MByte/s and the download +bandwidth to 100 kByte/s. Either limit can be "off" meaning no limit, so +to just limit the upload bandwidth you would use + + --bwlimit 10M:off + +this would limit the upload bandwidth to 10MByte/s but the download +bandwidth would be unlimited. + +When specified as above the bandwidth limits last for the duration of +run of the rclone binary. + +It is also possible to specify a "timetable" of limits, which will +cause certain limits to be applied at certain times. To specify a +timetable, format your entries as `WEEKDAY-HH:MM,BANDWIDTH +WEEKDAY-HH:MM,BANDWIDTH...` where: `WEEKDAY` is optional element. + +- `BANDWIDTH` can be a single number, e.g.`100k` or a pair of numbers +for upload:download, e.g.`10M:1M`. +- `WEEKDAY` can be written as the whole word or only using the first 3 + characters. It is optional. +- `HH:MM` is an hour from 00:00 to 23:59. + +An example of a typical timetable to avoid link saturation during daytime +working hours could be: + +`--bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off"` + +In this example, the transfer bandwidth will be set to 512kBytes/sec +at 8am every day. At noon, it will rise to 10MByte/s, and drop back +to 512kBytes/sec at 1pm. At 6pm, the bandwidth limit will be set to +30MByte/s, and at 11pm it will be completely disabled (full speed). +Anything between 11pm and 8am will remain unlimited. + +An example of timetable with `WEEKDAY` could be: + +`--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"` + +It means that, the transfer bandwidth will be set to 512kBytes/sec on +Monday. It will rise to 10MByte/s before the end of Friday. At 10:00 +on Saturday it will be set to 1MByte/s. From 20:00 on Sunday it will +be unlimited. + +Timeslots without `WEEKDAY` are extended to the whole week. So this +example: + +`--bwlimit "Mon-00:00,512 12:00,1M Sun-20:00,off"` + +Is equivalent to this: + +`--bwlimit "Mon-00:00,512Mon-12:00,1M Tue-12:00,1M Wed-12:00,1M Thu-12:00,1M Fri-12:00,1M Sat-12:00,1M Sun-12:00,1M Sun-20:00,off"` + +Bandwidth limit apply to the data transfer for all backends. For most +backends the directory listing bandwidth is also included (exceptions +being the non HTTP backends, `ftp`, `sftp` and `tardigrade`). + +Note that the units are **Bytes/s**, not **Bits/s**. Typically +connections are measured in Bits/s - to convert divide by 8. For +example, let's say you have a 10 Mbit/s connection and you wish rclone +to use half of it - 5 Mbit/s. This is 5/8 = 0.625MByte/s so you would +use a `--bwlimit 0.625M` parameter for rclone. + +On Unix systems (Linux, macOS, …) the bandwidth limiter can be toggled by +sending a `SIGUSR2` signal to rclone. This allows to remove the limitations +of a long running rclone transfer and to restore it back to the value specified +with `--bwlimit` quickly when needed. Assuming there is only one rclone instance +running, you can toggle the limiter like this: + + kill -SIGUSR2 $(pidof rclone) + +If you configure rclone with a [remote control](/rc) then you can use +change the bwlimit dynamically: + + rclone rc core/bwlimit rate=1M + +### --bwlimit-file=BANDWIDTH_SPEC ### + +This option controls per file bandwidth limit. For the options see the +`--bwlimit` flag. + +For example use this to allow no transfers to be faster than 1MByte/s + + --bwlimit-file 1M + +This can be used in conjunction with `--bwlimit`. + +Note that if a schedule is provided the file will use the schedule in +effect at the start of the transfer. + +### --buffer-size=SIZE ### + +Use this sized buffer to speed up file transfers. Each `--transfer` +will use this much memory for buffering. + +When using `mount` or `cmount` each open file descriptor will use this much +memory for buffering. +See the [mount](/commands/rclone_mount/#file-buffering) documentation for more details. + +Set to `0` to disable the buffering for the minimum memory usage. + +Note that the memory allocation of the buffers is influenced by the +[--use-mmap](#use-mmap) flag. + +### --check-first ### + +If this flag is set then in a `sync`, `copy` or `move`, rclone will do +all the checks to see whether files need to be transferred before +doing any of the transfers. Normally rclone would start running +transfers as soon as possible. + +This flag can be useful on IO limited systems where transfers +interfere with checking. + +It can also be useful to ensure perfect ordering when using +`--order-by`. + +Using this flag can use more memory as it effectively sets +`--max-backlog` to infinite. This means that all the info on the +objects to transfer is held in memory before the transfers start. + +### --checkers=N ### + +The number of checkers to run in parallel. Checkers do the equality +checking of files during a sync. For some storage systems (e.g. S3, +Swift, Dropbox) this can take a significant amount of time so they are +run in parallel. + +The default is to run 8 checkers in parallel. + +### -c, --checksum ### + +Normally rclone will look at modification time and size of files to +see if they are equal. If you set this flag then rclone will check +the file hash and size to determine if files are equal. + +This is useful when the remote doesn't support setting modified time +and a more accurate sync is desired than just checking the file size. + +This is very useful when transferring between remotes which store the +same hash type on the object, e.g. Drive and Swift. For details of which +remotes support which hash type see the table in the [overview +section](/overview/). + +Eg `rclone --checksum sync s3:/bucket swift:/bucket` would run much +quicker than without the `--checksum` flag. + +When using this flag, rclone won't update mtimes of remote files if +they are incorrect as it would normally. + +### --compare-dest=DIR ### + +When using `sync`, `copy` or `move` DIR is checked in addition to the +destination for files. If a file identical to the source is found that +file is NOT copied from source. This is useful to copy just files that +have changed since the last backup. + +You must use the same remote as the destination of the sync. The +compare directory must not overlap the destination directory. + +See `--copy-dest` and `--backup-dir`. + +### --config=CONFIG_FILE ### + +Specify the location of the rclone configuration file, to override +the default. E.g. `rclone --config="rclone.conf" config`. + +The exact default is a bit complex to describe, due to changes +introduced through different versions of rclone while preserving +backwards compatibility, but in most cases it is as simple as: + + - `%APPDATA%/rclone/rclone.conf` on Windows + - `~/.config/rclone/rclone.conf` on other + +The complete logic is as follows: Rclone will look for an existing +configuration file in any of the following locations, in priority order: + + 1. `rclone.conf` (in program directory, where rclone executable is) + 2. `%APPDATA%/rclone/rclone.conf` (only on Windows) + 3. `$XDG_CONFIG_HOME/rclone/rclone.conf` (on all systems, including Windows) + 4. `~/.config/rclone/rclone.conf` (see below for explanation of ~ symbol) + 5. `~/.rclone.conf` + +If no existing configuration file is found, then a new one will be created +in the following location: + +- On Windows: Location 2 listed above, except in the unlikely event + that `APPDATA` is not defined, then location 4 is used instead. +- On Unix: Location 3 if `XDG_CONFIG_HOME` is defined, else location 4. +- Fallback to location 5 (on all OS), when the rclone directory cannot be + created, but if also a home directory was not found then path + `.rclone.conf` relative to current working directory will be used as + a final resort. + +The `~` symbol in paths above represent the home directory of the current user +on any OS, and the value is defined as following: + + - On Windows: `%HOME%` if defined, else `%USERPROFILE%`, or else `%HOMEDRIVE%\%HOMEPATH%`. + - On Unix: `$HOME` if defined, else by looking up current user in OS-specific user database + (e.g. passwd file), or else use the result from shell command `cd && pwd`. + +If you run `rclone config file` you will see where the default +location is for you. + +<<<<<<< HEAD +Use this flag to override the config location, e.g. +`rclone config --config="rclone.conf"`. +======= +The fact that an existing file `rclone.conf` in the same directory +as the rclone executable is always preferred, means that it is easy +to run in "portable" mode by downloading rclone executable to a +writable directory and then create an empty file `rclone.conf` in the +same directory. +>>>>>>> 68275f993 (config: create config file in windows appdata directory by default) + +If the location is set to empty string `""` or the special value +`/notfound`, or the os null device represented by value `NUL` on +Windows and `/dev/null` on Unix systems, then rclone will keep the +config file in memory only. + +The file format is basic [INI](https://en.wikipedia.org/wiki/INI_file#Format): +Sections of text, led by a `[section]` header and followed by +`key=value` entries on separate lines. In rclone each remote is +represented by its own section, where the section name defines the +name of the remote. Options are specified as the `key=value` entries, +where the key is the option name without the `--backend-` prefix, +in lowercase and with `_` instead of `-`. E.g. option `--mega-hard-delete` +corresponds to key `hard_delete`. Only backend options can be specified. +A special, and required, key `type` identifies the [storage system](/overview/), +where the value is the internal lowercase name as returned by command +`rclone help backends`. Comments are indicated by `;` or `#` at the +beginning of a line. + +Example: + + [megaremote] + type = mega + user = you@example.com + pass = PDPcQVVjVtzFY-GTdDFozqBhTdsPg3qH + +Note that passwords are in [obscured](/commands/rclone_obscure/) +form. Also, many storage systems uses token-based authentication instead +of passwords, and this requires additional steps. It is easier, and safer, +to use the interactive command `rclone config` instead of manually +editing the configuration file. + +The configuration file will typically contain login information, and +should therefore have restricted permissions so that only the current user +can read it. Rclone tries to ensure this when it writes the file. +You may also choose to [encrypt](#configuration-encryption) the file. + +When token-based authentication are used, the configuration file +must be writable, because rclone needs to update the tokens inside it. + +### --contimeout=TIME ### + +Set the connection timeout. This should be in go time format which +looks like `5s` for 5 seconds, `10m` for 10 minutes, or `3h30m`. + +The connection timeout is the amount of time rclone will wait for a +connection to go through to a remote object storage system. It is +`1m` by default. + +### --copy-dest=DIR ### + +When using `sync`, `copy` or `move` DIR is checked in addition to the +destination for files. If a file identical to the source is found that +file is server-side copied from DIR to the destination. This is useful +for incremental backup. + +The remote in use must support server-side copy and you must +use the same remote as the destination of the sync. The compare +directory must not overlap the destination directory. + +See `--compare-dest` and `--backup-dir`. + +### --dedupe-mode MODE ### + +Mode to run dedupe command in. One of `interactive`, `skip`, `first`, `newest`, `oldest`, `rename`. The default is `interactive`. See the dedupe command for more information as to what these options mean. + +### --disable FEATURE,FEATURE,... ### + +This disables a comma separated list of optional features. For example +to disable server-side move and server-side copy use: + + --disable move,copy + +The features can be put in any case. + +To see a list of which features can be disabled use: + + --disable help + +See the overview [features](/overview/#features) and +[optional features](/overview/#optional-features) to get an idea of +which feature does what. + +This flag can be useful for debugging and in exceptional circumstances +(e.g. Google Drive limiting the total volume of Server Side Copies to +100GB/day). + +### --dscp VALUE ### + +Specify a DSCP value or name to use in connections. This could help QoS +system to identify traffic class. BE, EF, DF, LE, CSx and AFxx are allowed. + +See the description of [differentiated services](https://en.wikipedia.org/wiki/Differentiated_services) to get an idea of +this field. Setting this to 1 (LE) to identify the flow to SCAVENGER class +can avoid occupying too much bandwidth in a network with DiffServ support ([RFC 8622](https://tools.ietf.org/html/rfc8622)). + +For example, if you configured QoS on router to handle LE properly. Running: +``` +rclone copy --dscp LE from:/from to:/to +``` +would make the priority lower than usual internet flows. + +### -n, --dry-run ### + +Do a trial run with no permanent changes. Use this to see what rclone +would do without actually doing it. Useful when setting up the `sync` +command which deletes files in the destination. + +### --expect-continue-timeout=TIME ### + +This specifies the amount of time to wait for a server's first +response headers after fully writing the request headers if the +request has an "Expect: 100-continue" header. Not all backends support +using this. + +Zero means no timeout and causes the body to be sent immediately, +without waiting for the server to approve. This time does not include +the time to send the request header. + +The default is `1s`. Set to `0` to disable. + +### --error-on-no-transfer ### + +By default, rclone will exit with return code 0 if there were no errors. + +This option allows rclone to return exit code 9 if no files were transferred +between the source and destination. This allows using rclone in scripts, and +triggering follow-on actions if data was copied, or skipping if not. + +NB: Enabling this option turns a usually non-fatal error into a potentially +fatal one - please check and adjust your scripts accordingly! + +### --fs-cache-expire-duration=TIME + +When using rclone via the API rclone caches created remotes for 5 +minutes by default in the "fs cache". This means that if you do +repeated actions on the same remote then rclone won't have to build it +again from scratch, which makes it more efficient. + +This flag sets the time that the remotes are cached for. If you set it +to `0` (or negative) then rclone won't cache the remotes at all. + +Note that if you use some flags, eg `--backup-dir` and if this is set +to `0` rclone may build two remotes (one for the source or destination +and one for the `--backup-dir` where it may have only built one +before. + +### --fs-cache-expire-interval=TIME + +This controls how often rclone checks for cached remotes to expire. +See the `--fs-cache-expire-duration` documentation above for more +info. The default is 60s, set to 0 to disable expiry. + +### --header ### + +Add an HTTP header for all transactions. The flag can be repeated to +add multiple headers. + +If you want to add headers only for uploads use `--header-upload` and +if you want to add headers only for downloads use `--header-download`. + +This flag is supported for all HTTP based backends even those not +supported by `--header-upload` and `--header-download` so may be used +as a workaround for those with care. + +``` +rclone ls remote:test --header "X-Rclone: Foo" --header "X-LetMeIn: Yes" +``` + +### --header-download ### + +Add an HTTP header for all download transactions. The flag can be repeated to +add multiple headers. + +``` +rclone sync -i s3:test/src ~/dst --header-download "X-Amz-Meta-Test: Foo" --header-download "X-Amz-Meta-Test2: Bar" +``` + +See the GitHub issue [here](https://github.com/rclone/rclone/issues/59) for +currently supported backends. + +### --header-upload ### + +Add an HTTP header for all upload transactions. The flag can be repeated to add +multiple headers. + +``` +rclone sync -i ~/src s3:test/dst --header-upload "Content-Disposition: attachment; filename='cool.html'" --header-upload "X-Amz-Meta-Test: FooBar" +``` + +See the GitHub issue [here](https://github.com/rclone/rclone/issues/59) for +currently supported backends. + +### --ignore-case-sync ### + +Using this option will cause rclone to ignore the case of the files +when synchronizing so files will not be copied/synced when the +existing filenames are the same, even if the casing is different. + +### --ignore-checksum ### + +Normally rclone will check that the checksums of transferred files +match, and give an error "corrupted on transfer" if they don't. + +You can use this option to skip that check. You should only use it if +you have had the "corrupted on transfer" error message and you are +sure you might want to transfer potentially corrupted data. + +### --ignore-existing ### + +Using this option will make rclone unconditionally skip all files +that exist on the destination, no matter the content of these files. + +While this isn't a generally recommended option, it can be useful +in cases where your files change due to encryption. However, it cannot +correct partial transfers in case a transfer was interrupted. + +### --ignore-size ### + +Normally rclone will look at modification time and size of files to +see if they are equal. If you set this flag then rclone will check +only the modification time. If `--checksum` is set then it only +checks the checksum. + +It will also cause rclone to skip verifying the sizes are the same +after transfer. + +This can be useful for transferring files to and from OneDrive which +occasionally misreports the size of image files (see +[#399](https://github.com/rclone/rclone/issues/399) for more info). + +### -I, --ignore-times ### + +Using this option will cause rclone to unconditionally upload all +files regardless of the state of files on the destination. + +Normally rclone would skip any files that have the same +modification time and are the same size (or have the same checksum if +using `--checksum`). + +### --immutable ### + +Treat source and destination files as immutable and disallow +modification. + +With this option set, files will be created and deleted as requested, +but existing files will never be updated. If an existing file does +not match between the source and destination, rclone will give the error +`Source and destination exist but do not match: immutable file modified`. + +Note that only commands which transfer files (e.g. `sync`, `copy`, +`move`) are affected by this behavior, and only modification is +disallowed. Files may still be deleted explicitly (e.g. `delete`, +`purge`) or implicitly (e.g. `sync`, `move`). Use `copy --immutable` +if it is desired to avoid deletion as well as modification. + +This can be useful as an additional layer of protection for immutable +or append-only data sets (notably backup archives), where modification +implies corruption and should not be propagated. + +### -i / --interactive {#interactive} + +This flag can be used to tell rclone that you wish a manual +confirmation before destructive operations. + +It is **recommended** that you use this flag while learning rclone +especially with `rclone sync`. + +For example + +``` +$ rclone delete -i /tmp/dir +rclone: delete "important-file.txt"? +y) Yes, this is OK (default) +n) No, skip this +s) Skip all delete operations with no more questions +!) Do all delete operations with no more questions +q) Exit rclone now. +y/n/s/!/q> n +``` + +The options mean + +- `y`: **Yes**, this operation should go ahead. You can also press Return + for this to happen. You'll be asked every time unless you choose `s` + or `!`. +- `n`: **No**, do not do this operation. You'll be asked every time unless + you choose `s` or `!`. +- `s`: **Skip** all the following operations of this type with no more + questions. This takes effect until rclone exits. If there are any + different kind of operations you'll be prompted for them. +- `!`: **Do all** the following operations with no more + questions. Useful if you've decided that you don't mind rclone doing + that kind of operation. This takes effect until rclone exits . If + there are any different kind of operations you'll be prompted for + them. +- `q`: **Quit** rclone now, just in case! + +### --leave-root #### + +During rmdirs it will not remove root directory, even if it's empty. + +### --log-file=FILE ### + +Log all of rclone's output to FILE. This is not active by default. +This can be useful for tracking down problems with syncs in +combination with the `-v` flag. See the [Logging section](#logging) +for more info. + +If FILE exists then rclone will append to it. + +Note that if you are using the `logrotate` program to manage rclone's +logs, then you should use the `copytruncate` option as rclone doesn't +have a signal to rotate logs. + +### --log-format LIST ### + +Comma separated list of log format options. `date`, `time`, `microseconds`, `longfile`, `shortfile`, `UTC`. The default is "`date`,`time`". + +### --log-level LEVEL ### + +This sets the log level for rclone. The default log level is `NOTICE`. + +`DEBUG` is equivalent to `-vv`. It outputs lots of debug info - useful +for bug reports and really finding out what rclone is doing. + +`INFO` is equivalent to `-v`. It outputs information about each transfer +and prints stats once a minute by default. + +`NOTICE` is the default log level if no logging flags are supplied. It +outputs very little when things are working normally. It outputs +warnings and significant events. + +`ERROR` is equivalent to `-q`. It only outputs error messages. + +### --use-json-log ### + +This switches the log format to JSON for rclone. The fields of json log +are level, msg, source, time. + +### --low-level-retries NUMBER ### + +This controls the number of low level retries rclone does. + +A low level retry is used to retry a failing operation - typically one +HTTP request. This might be uploading a chunk of a big file for +example. You will see low level retries in the log with the `-v` +flag. + +This shouldn't need to be changed from the default in normal operations. +However, if you get a lot of low level retries you may wish +to reduce the value so rclone moves on to a high level retry (see the +`--retries` flag) quicker. + +Disable low level retries with `--low-level-retries 1`. + +### --max-backlog=N ### + +This is the maximum allowable backlog of files in a sync/copy/move +queued for being checked or transferred. + +This can be set arbitrarily large. It will only use memory when the +queue is in use. Note that it will use in the order of N kB of memory +when the backlog is in use. + +Setting this large allows rclone to calculate how many files are +pending more accurately, give a more accurate estimated finish +time and make `--order-by` work more accurately. + +Setting this small will make rclone more synchronous to the listings +of the remote which may be desirable. + +Setting this to a negative number will make the backlog as large as +possible. + +### --max-delete=N ### + +This tells rclone not to delete more than N files. If that limit is +exceeded then a fatal error will be generated and rclone will stop the +operation in progress. + +### --max-depth=N ### + +This modifies the recursion depth for all the commands except purge. + +So if you do `rclone --max-depth 1 ls remote:path` you will see only +the files in the top level directory. Using `--max-depth 2` means you +will see all the files in first two directory levels and so on. + +For historical reasons the `lsd` command defaults to using a +`--max-depth` of 1 - you can override this with the command line flag. + +You can use this command to disable recursion (with `--max-depth 1`). + +Note that if you use this with `sync` and `--delete-excluded` the +files not recursed through are considered excluded and will be deleted +on the destination. Test first with `--dry-run` if you are not sure +what will happen. + +### --max-duration=TIME ### + +Rclone will stop scheduling new transfers when it has run for the +duration specified. + +Defaults to off. + +When the limit is reached any existing transfers will complete. + +Rclone won't exit with an error if the transfer limit is reached. + +### --max-transfer=SIZE ### + +Rclone will stop transferring when it has reached the size specified. +Defaults to off. + +When the limit is reached all transfers will stop immediately. + +Rclone will exit with exit code 8 if the transfer limit is reached. + +### --cutoff-mode=hard|soft|cautious ### + +This modifies the behavior of `--max-transfer` +Defaults to `--cutoff-mode=hard`. + +Specifying `--cutoff-mode=hard` will stop transferring immediately +when Rclone reaches the limit. + +Specifying `--cutoff-mode=soft` will stop starting new transfers +when Rclone reaches the limit. + +Specifying `--cutoff-mode=cautious` will try to prevent Rclone +from reaching the limit. + +### --modify-window=TIME ### + +When checking whether a file has been modified, this is the maximum +allowed time difference that a file can have and still be considered +equivalent. + +The default is `1ns` unless this is overridden by a remote. For +example OS X only stores modification times to the nearest second so +if you are reading and writing to an OS X filing system this will be +`1s` by default. + +This command line flag allows you to override that computed default. + +### --multi-thread-cutoff=SIZE ### + +When downloading files to the local backend above this size, rclone +will use multiple threads to download the file (default 250M). + +Rclone preallocates the file (using `fallocate(FALLOC_FL_KEEP_SIZE)` +on unix or `NTSetInformationFile` on Windows both of which takes no +time) then each thread writes directly into the file at the correct +place. This means that rclone won't create fragmented or sparse files +and there won't be any assembly time at the end of the transfer. + +The number of threads used to download is controlled by +`--multi-thread-streams`. + +Use `-vv` if you wish to see info about the threads. + +This will work with the `sync`/`copy`/`move` commands and friends +`copyto`/`moveto`. Multi thread downloads will be used with `rclone +mount` and `rclone serve` if `--vfs-cache-mode` is set to `writes` or +above. + +**NB** that this **only** works for a local destination but will work +with any source. + +**NB** that multi thread copies are disabled for local to local copies +as they are faster without unless `--multi-thread-streams` is set +explicitly. + +**NB** on Windows using multi-thread downloads will cause the +resulting files to be [sparse](https://en.wikipedia.org/wiki/Sparse_file). +Use `--local-no-sparse` to disable sparse files (which may cause long +delays at the start of downloads) or disable multi-thread downloads +with `--multi-thread-streams 0` + +### --multi-thread-streams=N ### + +When using multi thread downloads (see above `--multi-thread-cutoff`) +this sets the maximum number of streams to use. Set to `0` to disable +multi thread downloads (Default 4). + +Exactly how many streams rclone uses for the download depends on the +size of the file. To calculate the number of download streams Rclone +divides the size of the file by the `--multi-thread-cutoff` and rounds +up, up to the maximum set with `--multi-thread-streams`. + +So if `--multi-thread-cutoff 250MB` and `--multi-thread-streams 4` are +in effect (the defaults): + +- 0MB..250MB files will be downloaded with 1 stream +- 250MB..500MB files will be downloaded with 2 streams +- 500MB..750MB files will be downloaded with 3 streams +- 750MB+ files will be downloaded with 4 streams + +### --no-check-dest ### + +The `--no-check-dest` can be used with `move` or `copy` and it causes +rclone not to check the destination at all when copying files. + +This means that: + +- the destination is not listed minimising the API calls +- files are always transferred +- this can cause duplicates on remotes which allow it (e.g. Google Drive) +- `--retries 1` is recommended otherwise you'll transfer everything again on a retry + +This flag is useful to minimise the transactions if you know that none +of the files are on the destination. + +This is a specialized flag which should be ignored by most users! + +### --no-gzip-encoding ### + +Don't set `Accept-Encoding: gzip`. This means that rclone won't ask +the server for compressed files automatically. Useful if you've set +the server to return files with `Content-Encoding: gzip` but you +uploaded compressed files. + +There is no need to set this in normal operation, and doing so will +decrease the network transfer efficiency of rclone. + +### --no-traverse ### + +The `--no-traverse` flag controls whether the destination file system +is traversed when using the `copy` or `move` commands. +`--no-traverse` is not compatible with `sync` and will be ignored if +you supply it with `sync`. + +If you are only copying a small number of files (or are filtering most +of the files) and/or have a large number of files on the destination +then `--no-traverse` will stop rclone listing the destination and save +time. + +However, if you are copying a large number of files, especially if you +are doing a copy where lots of the files under consideration haven't +changed and won't need copying then you shouldn't use `--no-traverse`. + +See [rclone copy](/commands/rclone_copy/) for an example of how to use it. + +### --no-unicode-normalization ### + +Don't normalize unicode characters in filenames during the sync routine. + +Sometimes, an operating system will store filenames containing unicode +parts in their decomposed form (particularly macOS). Some cloud storage +systems will then recompose the unicode, resulting in duplicate files if +the data is ever copied back to a local filesystem. + +Using this flag will disable that functionality, treating each unicode +character as unique. For example, by default é and é will be normalized +into the same character. With `--no-unicode-normalization` they will be +treated as unique characters. + +### --no-update-modtime ### + +When using this flag, rclone won't update modification times of remote +files if they are incorrect as it would normally. + +This can be used if the remote is being synced with another tool also +(e.g. the Google Drive client). + +### --order-by string ### + +The `--order-by` flag controls the order in which files in the backlog +are processed in `rclone sync`, `rclone copy` and `rclone move`. + +The order by string is constructed like this. The first part +describes what aspect is being measured: + +- `size` - order by the size of the files +- `name` - order by the full path of the files +- `modtime` - order by the modification date of the files + +This can have a modifier appended with a comma: + +- `ascending` or `asc` - order so that the smallest (or oldest) is processed first +- `descending` or `desc` - order so that the largest (or newest) is processed first +- `mixed` - order so that the smallest is processed first for some threads and the largest for others + +If the modifier is `mixed` then it can have an optional percentage +(which defaults to `50`), e.g. `size,mixed,25` which means that 25% of +the threads should be taking the smallest items and 75% the +largest. The threads which take the smallest first will always take +the smallest first and likewise the largest first threads. The `mixed` +mode can be useful to minimise the transfer time when you are +transferring a mixture of large and small files - the large files are +guaranteed upload threads and bandwidth and the small files will be +processed continuously. + +If no modifier is supplied then the order is `ascending`. + +For example + +- `--order-by size,desc` - send the largest files first +- `--order-by modtime,ascending` - send the oldest files first +- `--order-by name` - send the files with alphabetically by path first + +If the `--order-by` flag is not supplied or it is supplied with an +empty string then the default ordering will be used which is as +scanned. With `--checkers 1` this is mostly alphabetical, however +with the default `--checkers 8` it is somewhat random. + +#### Limitations + +The `--order-by` flag does not do a separate pass over the data. This +means that it may transfer some files out of the order specified if + +- there are no files in the backlog or the source has not been fully scanned yet +- there are more than [--max-backlog](#max-backlog-n) files in the backlog + +Rclone will do its best to transfer the best file it has so in +practice this should not cause a problem. Think of `--order-by` as +being more of a best efforts flag rather than a perfect ordering. + +If you want perfect ordering then you will need to specify +[--check-first](#check-first) which will find all the files which need +transferring first before transferring any. + +### --password-command SpaceSepList ### + +This flag supplies a program which should supply the config password +when run. This is an alternative to rclone prompting for the password +or setting the `RCLONE_CONFIG_PASS` variable. + +The argument to this should be a command with a space separated list +of arguments. If one of the arguments has a space in then enclose it +in `"`, if you want a literal `"` in an argument then enclose the +argument in `"` and double the `"`. See [CSV encoding](https://godoc.org/encoding/csv) +for more info. + +Eg + + --password-command echo hello + --password-command echo "hello with space" + --password-command echo "hello with ""quotes"" and space" + +See the [Configuration Encryption](#configuration-encryption) for more info. + +See a [Windows PowerShell example on the Wiki](https://github.com/rclone/rclone/wiki/Windows-Powershell-use-rclone-password-command-for-Config-file-password). + +### -P, --progress ### + +This flag makes rclone update the stats in a static block in the +terminal providing a realtime overview of the transfer. + +Any log messages will scroll above the static block. Log messages +will push the static block down to the bottom of the terminal where it +will stay. + +Normally this is updated every 500mS but this period can be overridden +with the `--stats` flag. + +This can be used with the `--stats-one-line` flag for a simpler +display. + +Note: On Windows until [this bug](https://github.com/Azure/go-ansiterm/issues/26) +is fixed all non-ASCII characters will be replaced with `.` when +`--progress` is in use. + +### --progress-terminal-title ### + +This flag, when used with `-P/--progress`, will print the string `ETA: %s` +to the terminal title. + +### -q, --quiet ### + +This flag will limit rclone's output to error messages only. + +### --refresh-times ### + +The `--refresh-times` flag can be used to update modification times of +existing files when they are out of sync on backends which don't +support hashes. + +This is useful if you uploaded files with the incorrect timestamps and +you now wish to correct them. + +This flag is **only** useful for destinations which don't support +hashes (e.g. `crypt`). + +This can be used any of the sync commands `sync`, `copy` or `move`. + +To use this flag you will need to be doing a modification time sync +(so not using `--size-only` or `--checksum`). The flag will have no +effect when using `--size-only` or `--checksum`. + +If this flag is used when rclone comes to upload a file it will check +to see if there is an existing file on the destination. If this file +matches the source with size (and checksum if available) but has a +differing timestamp then instead of re-uploading it, rclone will +update the timestamp on the destination file. If the checksum does not +match rclone will upload the new file. If the checksum is absent (e.g. +on a `crypt` backend) then rclone will update the timestamp. + +Note that some remotes can't set the modification time without +re-uploading the file so this flag is less useful on them. + +Normally if you are doing a modification time sync rclone will update +modification times without `--refresh-times` provided that the remote +supports checksums **and** the checksums match on the file. However if the +checksums are absent then rclone will upload the file rather than +setting the timestamp as this is the safe behaviour. + +### --retries int ### + +Retry the entire sync if it fails this many times it fails (default 3). + +Some remotes can be unreliable and a few retries help pick up the +files which didn't get transferred because of errors. + +Disable retries with `--retries 1`. + +### --retries-sleep=TIME ### + +This sets the interval between each retry specified by `--retries` + +The default is `0`. Use `0` to disable. + +### --size-only ### + +Normally rclone will look at modification time and size of files to +see if they are equal. If you set this flag then rclone will check +only the size. + +This can be useful transferring files from Dropbox which have been +modified by the desktop sync client which doesn't set checksums of +modification times in the same way as rclone. + +### --stats=TIME ### + +Commands which transfer data (`sync`, `copy`, `copyto`, `move`, +`moveto`) will print data transfer stats at regular intervals to show +their progress. + +This sets the interval. + +The default is `1m`. Use `0` to disable. + +If you set the stats interval then all commands can show stats. This +can be useful when running other commands, `check` or `mount` for +example. + +Stats are logged at `INFO` level by default which means they won't +show at default log level `NOTICE`. Use `--stats-log-level NOTICE` or +`-v` to make them show. See the [Logging section](#logging) for more +info on log levels. + +Note that on macOS you can send a SIGINFO (which is normally ctrl-T in +the terminal) to make the stats print immediately. + +### --stats-file-name-length integer ### +By default, the `--stats` output will truncate file names and paths longer +than 40 characters. This is equivalent to providing +`--stats-file-name-length 40`. Use `--stats-file-name-length 0` to disable +any truncation of file names printed by stats. + +### --stats-log-level string ### + +Log level to show `--stats` output at. This can be `DEBUG`, `INFO`, +`NOTICE`, or `ERROR`. The default is `INFO`. This means at the +default level of logging which is `NOTICE` the stats won't show - if +you want them to then use `--stats-log-level NOTICE`. See the [Logging +section](#logging) for more info on log levels. + +### --stats-one-line ### + +When this is specified, rclone condenses the stats into a single line +showing the most important stats only. + +### --stats-one-line-date ### + +When this is specified, rclone enables the single-line stats and prepends +the display with a date string. The default is `2006/01/02 15:04:05 - ` + +### --stats-one-line-date-format ### + +When this is specified, rclone enables the single-line stats and prepends +the display with a user-supplied date string. The date string MUST be +enclosed in quotes. Follow [golang specs](https://golang.org/pkg/time/#Time.Format) for +date formatting syntax. + +### --stats-unit=bits|bytes ### + +By default, data transfer rates will be printed in bytes/second. + +This option allows the data rate to be printed in bits/second. + +Data transfer volume will still be reported in bytes. + +The rate is reported as a binary unit, not SI unit. So 1 Mbit/s +equals 1,048,576 bits/s and not 1,000,000 bits/s. + +The default is `bytes`. + +### --suffix=SUFFIX ### + +When using `sync`, `copy` or `move` any files which would have been +overwritten or deleted will have the suffix added to them. If there +is a file with the same path (after the suffix has been added), then +it will be overwritten. + +The remote in use must support server-side move or copy and you must +use the same remote as the destination of the sync. + +This is for use with files to add the suffix in the current directory +or with `--backup-dir`. See `--backup-dir` for more info. + +For example + + rclone copy -i /path/to/local/file remote:current --suffix .bak + +will copy `/path/to/local` to `remote:current`, but for any files +which would have been updated or deleted have .bak added. + +If using `rclone sync` with `--suffix` and without `--backup-dir` then +it is recommended to put a filter rule in excluding the suffix +otherwise the `sync` will delete the backup files. + + rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak" + +### --suffix-keep-extension ### + +When using `--suffix`, setting this causes rclone put the SUFFIX +before the extension of the files that it backs up rather than after. + +So let's say we had `--suffix -2019-01-01`, without the flag `file.txt` +would be backed up to `file.txt-2019-01-01` and with the flag it would +be backed up to `file-2019-01-01.txt`. This can be helpful to make +sure the suffixed files can still be opened. + +### --syslog ### + +On capable OSes (not Windows or Plan9) send all log output to syslog. + +This can be useful for running rclone in a script or `rclone mount`. + +### --syslog-facility string ### + +If using `--syslog` this sets the syslog facility (e.g. `KERN`, `USER`). +See `man syslog` for a list of possible facilities. The default +facility is `DAEMON`. + +### --tpslimit float ### + +Limit transactions per second to this number. Default is 0 which is +used to mean unlimited transactions per second. + +A transaction is roughly defined as an API call; its exact meaning +will depend on the backend. For HTTP based backends it is an HTTP +PUT/GET/POST/etc and its response. For FTP/SFTP it is a round trip +transaction over TCP. + +For example to limit rclone to 10 transactions per second use +`--tpslimit 10`, or to 1 transaction every 2 seconds use `--tpslimit +0.5`. + +Use this when the number of transactions per second from rclone is +causing a problem with the cloud storage provider (e.g. getting you +banned or rate limited). + +This can be very useful for `rclone mount` to control the behaviour of +applications using it. + +This limit applies to all HTTP based backends and to the FTP and SFTP +backends. It does not apply to the local backend or the Tardigrade +backend. + +See also `--tpslimit-burst`. + +### --tpslimit-burst int ### + +Max burst of transactions for `--tpslimit` (default `1`). + +Normally `--tpslimit` will do exactly the number of transaction per +second specified. However if you supply `--tps-burst` then rclone can +save up some transactions from when it was idle giving a burst of up +to the parameter supplied. + +For example if you provide `--tpslimit-burst 10` then if rclone has +been idle for more than 10*`--tpslimit` then it can do 10 transactions +very quickly before they are limited again. + +This may be used to increase performance of `--tpslimit` without +changing the long term average number of transactions per second. + +### --track-renames ### + +By default, rclone doesn't keep track of renamed files, so if you +rename a file locally then sync it to a remote, rclone will delete the +old file on the remote and upload a new copy. + +If you use this flag, and the remote supports server-side copy or +server-side move, and the source and destination have a compatible +hash, then this will track renames during `sync` +operations and perform renaming server-side. + +Files will be matched by size and hash - if both match then a rename +will be considered. + +If the destination does not support server-side copy or move, rclone +will fall back to the default behaviour and log an error level message +to the console. + +Encrypted destinations are not currently supported by `--track-renames` +if `--track-renames-strategy` includes `hash`. + +Note that `--track-renames` is incompatible with `--no-traverse` and +that it uses extra memory to keep track of all the rename candidates. + +Note also that `--track-renames` is incompatible with +`--delete-before` and will select `--delete-after` instead of +`--delete-during`. + +### --track-renames-strategy (hash,modtime,leaf,size) ### + +This option changes the matching criteria for `--track-renames`. + +The matching is controlled by a comma separated selection of these tokens: + +- `modtime` - the modification time of the file - not supported on all backends +- `hash` - the hash of the file contents - not supported on all backends +- `leaf` - the name of the file not including its directory name +- `size` - the size of the file (this is always enabled) + +So using `--track-renames-strategy modtime,leaf` would match files +based on modification time, the leaf of the file name and the size +only. + +Using `--track-renames-strategy modtime` or `leaf` can enable +`--track-renames` support for encrypted destinations. + +If nothing is specified, the default option is matching by `hash`es. + +Note that the `hash` strategy is not supported with encrypted destinations. + +### --delete-(before,during,after) ### + +This option allows you to specify when files on your destination are +deleted when you sync folders. + +Specifying the value `--delete-before` will delete all files present +on the destination, but not on the source *before* starting the +transfer of any new or updated files. This uses two passes through the +file systems, one for the deletions and one for the copies. + +Specifying `--delete-during` will delete files while checking and +uploading files. This is the fastest option and uses the least memory. + +Specifying `--delete-after` (the default value) will delay deletion of +files until all new/updated files have been successfully transferred. +The files to be deleted are collected in the copy pass then deleted +after the copy pass has completed successfully. The files to be +deleted are held in memory so this mode may use more memory. This is +the safest mode as it will only delete files if there have been no +errors subsequent to that. If there have been errors before the +deletions start then you will get the message `not deleting files as +there were IO errors`. + +### --fast-list ### + +When doing anything which involves a directory listing (e.g. `sync`, +`copy`, `ls` - in fact nearly every command), rclone normally lists a +directory and processes it before using more directory lists to +process any subdirectories. This can be parallelised and works very +quickly using the least amount of memory. + +However, some remotes have a way of listing all files beneath a +directory in one (or a small number) of transactions. These tend to +be the bucket based remotes (e.g. S3, B2, GCS, Swift, Hubic). + +If you use the `--fast-list` flag then rclone will use this method for +listing directories. This will have the following consequences for +the listing: + + * It **will** use fewer transactions (important if you pay for them) + * It **will** use more memory. Rclone has to load the whole listing into memory. + * It *may* be faster because it uses fewer transactions + * It *may* be slower because it can't be parallelized + +rclone should always give identical results with and without +`--fast-list`. + +If you pay for transactions and can fit your entire sync listing into +memory then `--fast-list` is recommended. If you have a very big sync +to do then don't use `--fast-list` otherwise you will run out of +memory. + +If you use `--fast-list` on a remote which doesn't support it, then +rclone will just ignore it. + +### --timeout=TIME ### + +This sets the IO idle timeout. If a transfer has started but then +becomes idle for this long it is considered broken and disconnected. + +The default is `5m`. Set to `0` to disable. + +### --transfers=N ### + +The number of file transfers to run in parallel. It can sometimes be +useful to set this to a smaller number if the remote is giving a lot +of timeouts or bigger if you have lots of bandwidth and a fast remote. + +The default is to run 4 file transfers in parallel. + +### -u, --update ### + +This forces rclone to skip any files which exist on the destination +and have a modified time that is newer than the source file. + +This can be useful when transferring to a remote which doesn't support +mod times directly (or when using `--use-server-modtime` to avoid extra +API calls) as it is more accurate than a `--size-only` check and faster +than using `--checksum`. + +If an existing destination file has a modification time equal (within +the computed modify window precision) to the source file's, it will be +updated if the sizes are different. If `--checksum` is set then +rclone will update the destination if the checksums differ too. + +If an existing destination file is older than the source file then +it will be updated if the size or checksum differs from the source file. + +On remotes which don't support mod time directly (or when using +`--use-server-modtime`) the time checked will be the uploaded time. +This means that if uploading to one of these remotes, rclone will skip +any files which exist on the destination and have an uploaded time that +is newer than the modification time of the source file. + +### --use-mmap ### + +If this flag is set then rclone will use anonymous memory allocated by +mmap on Unix based platforms and VirtualAlloc on Windows for its +transfer buffers (size controlled by `--buffer-size`). Memory +allocated like this does not go on the Go heap and can be returned to +the OS immediately when it is finished with. + +If this flag is not set then rclone will allocate and free the buffers +using the Go memory allocator which may use more memory as memory +pages are returned less aggressively to the OS. + +It is possible this does not work well on all platforms so it is +disabled by default; in the future it may be enabled by default. + +### --use-server-modtime ### + +Some object-store backends (e.g, Swift, S3) do not preserve file modification +times (modtime). On these backends, rclone stores the original modtime as +additional metadata on the object. By default it will make an API call to +retrieve the metadata when the modtime is needed by an operation. + +Use this flag to disable the extra API call and rely instead on the server's +modified time. In cases such as a local to remote sync using `--update`, +knowing the local file is newer than the time it was last uploaded to the +remote is sufficient. In those cases, this flag can speed up the process and +reduce the number of API calls necessary. + +Using this flag on a sync operation without also using `--update` would cause +all files modified at any time other than the last upload time to be uploaded +again, which is probably not what you want. + +### -v, -vv, --verbose ### + +With `-v` rclone will tell you about each file that is transferred and +a small number of significant events. + +With `-vv` rclone will become very verbose telling you about every +file it considers and transfers. Please send bug reports with a log +with this setting. + +### -V, --version ### + +Prints the version number + +SSL/TLS options +--------------- + +The outgoing SSL/TLS connections rclone makes can be controlled with +these options. For example this can be very useful with the HTTP or +WebDAV backends. Rclone HTTP servers have their own set of +configuration for SSL/TLS which you can find in their documentation. + +### --ca-cert string + +This loads the PEM encoded certificate authority certificate and uses +it to verify the certificates of the servers rclone connects to. + +If you have generated certificates signed with a local CA then you +will need this flag to connect to servers using those certificates. + +### --client-cert string + +This loads the PEM encoded client side certificate. + +This is used for [mutual TLS authentication](https://en.wikipedia.org/wiki/Mutual_authentication). + +The `--client-key` flag is required too when using this. + +### --client-key string + +This loads the PEM encoded client side private key used for mutual TLS +authentication. Used in conjunction with `--client-cert`. + +### --no-check-certificate=true/false ### + +`--no-check-certificate` controls whether a client verifies the +server's certificate chain and host name. +If `--no-check-certificate` is true, TLS accepts any certificate +presented by the server and any host name in that certificate. +In this mode, TLS is susceptible to man-in-the-middle attacks. + +This option defaults to `false`. + +**This should be used only for testing.** + +Configuration Encryption +------------------------ +Your configuration file contains information for logging in to +your cloud services. This means that you should keep your +`rclone.conf` file in a secure location. + +If you are in an environment where that isn't possible, you can +add a password to your configuration. This means that you will +have to supply the password every time you start rclone. + +To add a password to your rclone configuration, execute `rclone config`. + +``` +>rclone config +Current remotes: + +e) Edit existing remote +n) New remote +d) Delete remote +s) Set configuration password +q) Quit config +e/n/d/s/q> +``` + +Go into `s`, Set configuration password: +``` +e/n/d/s/q> s +Your configuration is not encrypted. +If you add a password, you will protect your login information to cloud services. +a) Add Password +q) Quit to main menu +a/q> a +Enter NEW configuration password: +password: +Confirm NEW password: +password: +Password set +Your configuration is encrypted. +c) Change Password +u) Unencrypt configuration +q) Quit to main menu +c/u/q> +``` + +Your configuration is now encrypted, and every time you start rclone +you will have to supply the password. See below for details. +In the same menu, you can change the password or completely remove +encryption from your configuration. + +There is no way to recover the configuration if you lose your password. + +rclone uses [nacl secretbox](https://godoc.org/golang.org/x/crypto/nacl/secretbox) +which in turn uses XSalsa20 and Poly1305 to encrypt and authenticate +your configuration with secret-key cryptography. +The password is SHA-256 hashed, which produces the key for secretbox. +The hashed password is not stored. + +While this provides very good security, we do not recommend storing +your encrypted rclone configuration in public if it contains sensitive +information, maybe except if you use a very strong password. + +If it is safe in your environment, you can set the `RCLONE_CONFIG_PASS` +environment variable to contain your password, in which case it will be +used for decrypting the configuration. + +You can set this for a session from a script. For unix like systems +save this to a file called `set-rclone-password`: + +``` +#!/bin/echo Source this file don't run it + +read -s RCLONE_CONFIG_PASS +export RCLONE_CONFIG_PASS +``` + +Then source the file when you want to use it. From the shell you +would do `source set-rclone-password`. It will then ask you for the +password and set it in the environment variable. + +An alternate means of supplying the password is to provide a script +which will retrieve the password and print on standard output. This +script should have a fully specified path name and not rely on any +environment variables. The script is supplied either via +`--password-command="..."` command line argument or via the +`RCLONE_PASSWORD_COMMAND` environment variable. + +One useful example of this is using the `passwordstore` application +to retrieve the password: + +``` +export RCLONE_PASSWORD_COMMAND="pass rclone/config" +``` + +If the `passwordstore` password manager holds the password for the +rclone configuration, using the script method means the password +is primarily protected by the `passwordstore` system, and is never +embedded in the clear in scripts, nor available for examination +using the standard commands available. It is quite possible with +long running rclone sessions for copies of passwords to be innocently +captured in log files or terminal scroll buffers, etc. Using the +script method of supplying the password enhances the security of +the config password considerably. + +If you are running rclone inside a script, unless you are using the +`--password-command` method, you might want to disable +password prompts. To do that, pass the parameter +`--ask-password=false` to rclone. This will make rclone fail instead +of asking for a password if `RCLONE_CONFIG_PASS` doesn't contain +a valid password, and `--password-command` has not been supplied. + +Some rclone commands, such as `genautocomplete`, do not require configuration. +Nevertheless, rclone will read any configuration file found +according to the rules described [above](https://rclone.org/docs/#config-config-file). +If an encrypted configuration file is found, this means you will be prompted for +password (unless using `--password-command`). To avoid this, you can bypass +the loading of the configuration file by overriding the location with an empty +string `""` or the special value `/notfound`, or the os null device represented +by value `NUL` on Windows and `/dev/null` on Unix systems (before rclone +version 1.55 only this null device alternative was supported). +E.g. `rclone --config="" genautocomplete bash`. + +Developer options +----------------- + +These options are useful when developing or debugging rclone. There +are also some more remote specific options which aren't documented +here which are used for testing. These start with remote name e.g. +`--drive-test-option` - see the docs for the remote in question. + +### --cpuprofile=FILE ### + +Write CPU profile to file. This can be analysed with `go tool pprof`. + +#### --dump flag,flag,flag #### + +The `--dump` flag takes a comma separated list of flags to dump info +about. + +Note that some headers including `Accept-Encoding` as shown may not +be correct in the request and the response may not show `Content-Encoding` +if the go standard libraries auto gzip encoding was in effect. In this case +the body of the request will be gunzipped before showing it. + +The available flags are: + +#### --dump headers #### + +Dump HTTP headers with `Authorization:` lines removed. May still +contain sensitive info. Can be very verbose. Useful for debugging +only. + +Use `--dump auth` if you do want the `Authorization:` headers. + +#### --dump bodies #### + +Dump HTTP headers and bodies - may contain sensitive info. Can be +very verbose. Useful for debugging only. + +Note that the bodies are buffered in memory so don't use this for +enormous files. + +#### --dump requests #### + +Like `--dump bodies` but dumps the request bodies and the response +headers. Useful for debugging download problems. + +#### --dump responses #### + +Like `--dump bodies` but dumps the response bodies and the request +headers. Useful for debugging upload problems. + +#### --dump auth #### + +Dump HTTP headers - will contain sensitive info such as +`Authorization:` headers - use `--dump headers` to dump without +`Authorization:` headers. Can be very verbose. Useful for debugging +only. + +#### --dump filters #### + +Dump the filters to the output. Useful to see exactly what include +and exclude options are filtering on. + +#### --dump goroutines #### + +This dumps a list of the running go-routines at the end of the command +to standard output. + +#### --dump openfiles #### + +This dumps a list of the open files at the end of the command. It +uses the `lsof` command to do that so you'll need that installed to +use it. + +### --memprofile=FILE ### + +Write memory profile to file. This can be analysed with `go tool pprof`. + +Filtering +--------- + +For the filtering options + + * `--delete-excluded` + * `--filter` + * `--filter-from` + * `--exclude` + * `--exclude-from` + * `--include` + * `--include-from` + * `--files-from` + * `--files-from-raw` + * `--min-size` + * `--max-size` + * `--min-age` + * `--max-age` + * `--dump filters` + +See the [filtering section](/filtering/). + +Remote control +-------------- + +For the remote control options and for instructions on how to remote control rclone + + * `--rc` + * and anything starting with `--rc-` + +See [the remote control section](/rc/). + +Logging +------- + +rclone has 4 levels of logging, `ERROR`, `NOTICE`, `INFO` and `DEBUG`. + +By default, rclone logs to standard error. This means you can redirect +standard error and still see the normal output of rclone commands (e.g. +`rclone ls`). + +By default, rclone will produce `Error` and `Notice` level messages. + +If you use the `-q` flag, rclone will only produce `Error` messages. + +If you use the `-v` flag, rclone will produce `Error`, `Notice` and +`Info` messages. + +If you use the `-vv` flag, rclone will produce `Error`, `Notice`, +`Info` and `Debug` messages. + +You can also control the log levels with the `--log-level` flag. + +If you use the `--log-file=FILE` option, rclone will redirect `Error`, +`Info` and `Debug` messages along with standard error to FILE. + +If you use the `--syslog` flag then rclone will log to syslog and the +`--syslog-facility` control which facility it uses. + +Rclone prefixes all log messages with their level in capitals, e.g. INFO +which makes it easy to grep the log file for different kinds of +information. + +Exit Code +--------- + +If any errors occur during the command execution, rclone will exit with a +non-zero exit code. This allows scripts to detect when rclone +operations have failed. + +During the startup phase, rclone will exit immediately if an error is +detected in the configuration. There will always be a log message +immediately before exiting. + +When rclone is running it will accumulate errors as it goes along, and +only exit with a non-zero exit code if (after retries) there were +still failed transfers. For every error counted there will be a high +priority log message (visible with `-q`) showing the message and +which file caused the problem. A high priority message is also shown +when starting a retry so the user can see that any previous error +messages may not be valid after the retry. If rclone has done a retry +it will log a high priority message if the retry was successful. + +### List of exit codes ### + * `0` - success + * `1` - Syntax or usage error + * `2` - Error not otherwise categorised + * `3` - Directory not found + * `4` - File not found + * `5` - Temporary error (one that more retries might fix) (Retry errors) + * `6` - Less serious errors (like 461 errors from dropbox) (NoRetry errors) + * `7` - Fatal error (one that more retries won't fix, like account suspended) (Fatal errors) + * `8` - Transfer exceeded - limit set by --max-transfer reached + * `9` - Operation successful, but no files transferred + +Environment Variables +--------------------- + +Rclone can be configured entirely using environment variables. These +can be used to set defaults for options or config file entries. + +### Options ### + +Every option in rclone can have its default set by environment +variable. + +To find the name of the environment variable, first, take the long +option name, strip the leading `--`, change `-` to `_`, make +upper case and prepend `RCLONE_`. + +For example, to always set `--stats 5s`, set the environment variable +`RCLONE_STATS=5s`. If you set stats on the command line this will +override the environment variable setting. + +Or to always use the trash in drive `--drive-use-trash`, set +`RCLONE_DRIVE_USE_TRASH=true`. + +The same parser is used for the options and the environment variables +so they take exactly the same form. + +### Config file ### + +You can set defaults for values in the config file on an individual +remote basis. The names of the config items are documented in the page +for each backend. + +To find the name of the environment variable, you need to set, take +`RCLONE_CONFIG_` + name of remote + `_` + name of config file option +and make it all uppercase. + +For example, to configure an S3 remote named `mys3:` without a config +file (using unix ways of setting environment variables): + +``` +$ export RCLONE_CONFIG_MYS3_TYPE=s3 +$ export RCLONE_CONFIG_MYS3_ACCESS_KEY_ID=XXX +$ export RCLONE_CONFIG_MYS3_SECRET_ACCESS_KEY=XXX +$ rclone lsd MYS3: + -1 2016-09-21 12:54:21 -1 my-bucket +$ rclone listremotes | grep mys3 +mys3: +``` + +Note that if you want to create a remote using environment variables +you must create the `..._TYPE` variable as above. + +Note also that now rclone has [connection strings](#connection-strings), +it is probably easier to use those instead which makes the above example + + rclone lsd :s3,access_key_id=XXX,secret_access_key=XXX: + +### Precedence + +The various different methods of backend configuration are read in +this order and the first one with a value is used. + +- Flag values as supplied on the command line, e.g. `--drive-use-trash`. +- Remote specific environment vars, e.g. `RCLONE_CONFIG_MYREMOTE_USE_TRASH` (see above). +- Backend specific environment vars, e.g. `RCLONE_DRIVE_USE_TRASH`. +- Config file, e.g. `use_trash = false`. +- Default values, e.g. `true` - these can't be changed. + +So if both `--drive-use-trash` is supplied on the config line and an +environment variable `RCLONE_DRIVE_USE_TRASH` is set, the command line +flag will take preference. + +For non backend configuration the order is as follows: + +- Flag values as supplied on the command line, e.g. `--stats 5s`. +- Environment vars, e.g. `RCLONE_STATS=5s`. +- Default values, e.g. `1m` - these can't be changed. + +### Other environment variables ### + +- `RCLONE_CONFIG_PASS` set to contain your config file password (see [Configuration Encryption](#configuration-encryption) section) +- `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` (or the lowercase versions thereof). + - `HTTPS_PROXY` takes precedence over `HTTP_PROXY` for https requests. + - The environment values may be either a complete URL or a "host[:port]" for, in which case the "http" scheme is assumed. +- `RCLONE_CONFIG_DIR` - rclone **sets** this variable for use in config files and sub processes to point to the directory holding the config file. diff --git a/docs/content/drive.md b/docs/content/drive.md index c2c434840..1082d0ed0 100644 --- a/docs/content/drive.md +++ b/docs/content/drive.md @@ -993,7 +993,7 @@ See: https://github.com/rclone/rclone/issues/3631 Make upload limit errors be fatal -At the time of writing it is only possible to upload 750GB of data to +At the time of writing it is only possible to upload 750 GiB of data to Google Drive a day (this is an undocumented limit). When this limit is reached Google Drive produces a slightly different error message. When this flag is set it causes these errors to be fatal. These will stop @@ -1014,7 +1014,7 @@ See: https://github.com/rclone/rclone/issues/3857 Make download limit errors be fatal -At the time of writing it is only possible to download 10TB of data from +At the time of writing it is only possible to download 10 TiB of data from Google Drive a day (this is an undocumented limit). When this limit is reached Google Drive produces a slightly different error message. When this flag is set it causes these errors to be fatal. These will stop @@ -1226,7 +1226,7 @@ Use the -i flag to see what would be copied before copying. Drive has quite a lot of rate limiting. This causes rclone to be limited to transferring about 2 files per second only. Individual -files may be transferred much faster at 100s of MByte/s but lots of +files may be transferred much faster at 100s of MiByte/s but lots of small files can take a long time. Server side copies are also subject to a separate rate limit. If you diff --git a/docs/content/dropbox.md b/docs/content/dropbox.md index ae91c1cb2..6173e9214 100644 --- a/docs/content/dropbox.md +++ b/docs/content/dropbox.md @@ -185,7 +185,7 @@ Any files larger than this will be uploaded in chunks of this size. Note that chunks are buffered in memory (one at a time) so rclone can deal with retries. Setting this larger will increase the speed -slightly (at most 10% for 128MB in tests) at the cost of using more +slightly (at most 10% for 128 MiB in tests) at the cost of using more memory. It can be set smaller if you are tight on memory. - Config: chunk_size diff --git a/docs/content/filtering.md b/docs/content/filtering.md index 1119c340a..0e1ccae04 100644 --- a/docs/content/filtering.md +++ b/docs/content/filtering.md @@ -586,17 +586,17 @@ remote or flag value. The fix then is to quote values containing spaces. ### `--min-size` - Don't transfer any file smaller than this Controls the minimum size file within the scope of an rclone command. -Default units are `kBytes` but abbreviations `k`, `M`, or `G` are valid. +Default units are `KiByte` but abbreviations `K`, `M`, `G`, `T` or `P` are valid. -E.g. `rclone ls remote: --min-size 50k` lists files on `remote:` of 50 kByte +E.g. `rclone ls remote: --min-size 50k` lists files on `remote:` of 50 KiByte size or larger. ### `--max-size` - Don't transfer any file larger than this Controls the maximum size file within the scope of an rclone command. -Default units are `kBytes` but abbreviations `k`, `M`, or `G` are valid. +Default units are `KiByte` but abbreviations `K`, `M`, `G`, `T` or `P` are valid. -E.g. `rclone ls remote: --max-size 1G` lists files on `remote:` of 1 GByte +E.g. `rclone ls remote: --max-size 1G` lists files on `remote:` of 1 GiByte size or smaller. ### `--max-age` - Don't transfer any file older than this @@ -650,8 +650,8 @@ E.g. the scope of `rclone sync -i A: B:` can be restricted: rclone --min-size 50k --delete-excluded sync A: B: -All files on `B:` which are less than 50 kBytes are deleted -because they are excluded from the rclone sync command. +All files on `B:` which are less than 50 KiByte are deleted +because they are excluded from the rclone sync command. ### `--dump filters` - dump the filters to the output diff --git a/docs/content/flags.md b/docs/content/flags.md index aeb20483e..899323f3d 100755 --- a/docs/content/flags.md +++ b/docs/content/flags.md @@ -18,8 +18,8 @@ These flags are available for every command. --backup-dir string Make backups into hierarchy based in DIR. --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name. --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M) - --bwlimit BwTimetable Bandwidth limit in kByte/s, or use suffix b|k|M|G or a full timetable. - --bwlimit-file BwTimetable Bandwidth limit per file in kByte/s, or use suffix b|k|M|G or a full timetable. + --bwlimit BwTimetable Bandwidth limit in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable. + --bwlimit-file BwTimetable Bandwidth limit per file in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable. --ca-cert string CA certificate used to verify servers --cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone") --check-first Do all the checks before starting transfers. @@ -79,12 +79,12 @@ These flags are available for every command. --max-delete int When synchronizing, limit the number of deletes (default -1) --max-depth int If set limits the recursion depth to this. (default -1) --max-duration duration Maximum duration rclone will transfer data for. - --max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off) + --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off) --max-stats-groups int Maximum number of stats groups to keep in memory. On max oldest is discarded. (default 1000) --max-transfer SizeSuffix Maximum size of data to transfer. (default off) --memprofile string Write memory profile to file --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) - --min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off) + --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) --modify-window duration Max time diff to be considered the same (default 1ns) --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size. (default 250M) --multi-thread-streams int Max number of streams to use for multi-thread downloads. (default 4) @@ -170,12 +170,12 @@ and may be set in the config file. --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G) --acd-token string OAuth Access Token as a JSON blob. --acd-token-url string Token server url. - --acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s) + --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears. (default 3m0s) --alias-remote string Remote or path to alias. --azureblob-access-tier string Access tier of blob: hot, cool or archive. --azureblob-account string Storage Account Name (leave blank to use SAS URL or Emulator) --azureblob-archive-tier-delete Delete archive tier blobs before overwriting. - --azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M) + --azureblob-chunk-size SizeSuffix Upload chunk size (<= 100 MiB). (default 4M) --azureblob-disable-checksum Don't store MD5 checksum with object metadata. --azureblob-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8) --azureblob-endpoint string Endpoint for the service @@ -189,7 +189,7 @@ and may be set in the config file. --azureblob-public-access string Public access level of a container: blob, container. --azureblob-sas-url string SAS URL for container level access only --azureblob-service-principal-file string Path to file containing credentials for use with a service principal. - --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256MB). (Deprecated) + --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated) --azureblob-use-emulator Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint) --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure) --b2-account string Account ID or Application Key ID @@ -218,7 +218,7 @@ and may be set in the config file. --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point. --box-token string OAuth Access Token as a JSON blob. --box-token-url string Token server url. - --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M) + --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB). (default 50M) --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s) --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming. --cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend") diff --git a/docs/content/hubic.md b/docs/content/hubic.md index 64e93867b..e50ab9103 100644 --- a/docs/content/hubic.md +++ b/docs/content/hubic.md @@ -168,7 +168,7 @@ Leave blank to use the provider defaults. Above this size files will be chunked into a _segments container. Above this size files will be chunked into a _segments container. The -default for this is 5GB which is its maximum value. +default for this is 5 GiB which is its maximum value. - Config: chunk_size - Env Var: RCLONE_HUBIC_CHUNK_SIZE @@ -182,7 +182,7 @@ Don't chunk files during streaming upload. When doing streaming uploads (e.g. using rcat or mount) setting this flag will cause the swift backend to not upload chunked files. -This will limit the maximum upload size to 5GB. However non chunked +This will limit the maximum upload size to 5 GiB. However non chunked files are easier to deal with and have an MD5SUM. Rclone will still chunk files bigger than chunk_size when doing normal diff --git a/docs/content/onedrive.md b/docs/content/onedrive.md index 60e31ea4d..0edc3cf8e 100644 --- a/docs/content/onedrive.md +++ b/docs/content/onedrive.md @@ -430,7 +430,7 @@ in it will be mapped to `?` instead. #### File sizes #### -The largest allowed file size is 250GB for both OneDrive Personal and OneDrive for Business [(Updated 13 Jan 2021)](https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#individualfilesize). +The largest allowed file size is 250 GiB for both OneDrive Personal and OneDrive for Business [(Updated 13 Jan 2021)](https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#individualfilesize). #### Path length #### diff --git a/docs/content/overview.md b/docs/content/overview.md index ba9bca566..88d8e916f 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -58,7 +58,7 @@ Here is an overview of the major features of each cloud storage system. ¹ Dropbox supports [its own custom hash](https://www.dropbox.com/developers/reference/content-hash). -This is an SHA256 sum of all the 4MB block SHA256s. +This is an SHA256 sum of all the 4 MiB block SHA256s. ² SFTP supports checksums if the same login has shell access and `md5sum` or `sha1sum` as well as `echo` are in the remote's PATH. diff --git a/docs/content/qingstor.md b/docs/content/qingstor.md index b1bb4b42e..4771125a4 100644 --- a/docs/content/qingstor.md +++ b/docs/content/qingstor.md @@ -101,7 +101,7 @@ docs](/docs/#fast-list) for more details. ### Multipart uploads ### rclone supports multipart uploads with QingStor which means that it can -upload files bigger than 5GB. Note that files uploaded with multipart +upload files bigger than 5 GiB. Note that files uploaded with multipart upload don't have an MD5SUM. Note that incomplete multipart uploads older than 24 hours can be @@ -227,7 +227,7 @@ Number of connection retries. Cutoff for switching to chunked upload Any files larger than this will be uploaded in chunks of chunk_size. -The minimum is 0 and the maximum is 5GB. +The minimum is 0 and the maximum is 5 GiB. - Config: upload_cutoff - Env Var: RCLONE_QINGSTOR_UPLOAD_CUTOFF diff --git a/docs/content/s3.md b/docs/content/s3.md index f75ec58cb..b8d2a1781 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -327,7 +327,7 @@ objects). See the [rclone docs](/docs/#fast-list) for more details. `--fast-list` trades off API transactions for memory use. As a rough guide rclone uses 1k of memory per object stored, so using -`--fast-list` on a sync of a million objects will use roughly 1 GB of +`--fast-list` on a sync of a million objects will use roughly 1 GiB of RAM. If you are only copying a small number of files into a big repository @@ -407,13 +407,13 @@ work with the SDK properly: ### Multipart uploads ### rclone supports multipart uploads with S3 which means that it can -upload files bigger than 5GB. +upload files bigger than 5 GiB. Note that files uploaded *both* with multipart upload *and* through crypt remotes do not have MD5 sums. rclone switches from single part uploads to multipart uploads at the -point specified by `--s3-upload-cutoff`. This can be a maximum of 5GB +point specified by `--s3-upload-cutoff`. This can be a maximum of 5 GiB and a minimum of 0 (ie always upload multipart files). The chunk sizes used in the multipart upload are specified by @@ -1412,7 +1412,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke Cutoff for switching to chunked upload Any files larger than this will be uploaded in chunks of chunk_size. -The minimum is 0 and the maximum is 5GB. +The minimum is 0 and the maximum is 5 GiB. - Config: upload_cutoff - Env Var: RCLONE_S3_UPLOAD_CUTOFF @@ -1438,9 +1438,9 @@ Rclone will automatically increase the chunk size when uploading a large file of known size to stay below the 10,000 chunks limit. Files of unknown size are uploaded with the configured -chunk_size. Since the default chunk size is 5MB and there can be at +chunk_size. Since the default chunk size is 5 MiB and there can be at most 10,000 chunks, this means that by default the maximum size of -a file you can stream upload is 48GB. If you wish to stream upload +a file you can stream upload is 48 GiB. If you wish to stream upload larger files then you will need to increase chunk_size. - Config: chunk_size @@ -1474,7 +1474,7 @@ Cutoff for switching to multipart copy Any files larger than this that need to be server-side copied will be copied in chunks of this size. -The minimum is 0 and the maximum is 5GB. +The minimum is 0 and the maximum is 5 GiB. - Config: copy_cutoff - Env Var: RCLONE_S3_COPY_CUTOFF diff --git a/docs/content/sharefile.md b/docs/content/sharefile.md index cdf499be5..baffa1944 100644 --- a/docs/content/sharefile.md +++ b/docs/content/sharefile.md @@ -112,10 +112,10 @@ flag. ### Transfers ### -For files above 128MB rclone will use a chunked transfer. Rclone will +For files above 128 MiB rclone will use a chunked transfer. Rclone will upload up to `--transfers` chunks at the same time (shared among all the multipart uploads). Chunks are buffered in memory and are -normally 64MB so increasing `--transfers` will increase memory use. +normally 64 MiB so increasing `--transfers` will increase memory use. ### Limitations ### diff --git a/docs/content/swift.md b/docs/content/swift.md index 7e62f3ecd..eb7bd264a 100644 --- a/docs/content/swift.md +++ b/docs/content/swift.md @@ -444,7 +444,7 @@ If true avoid calling abort upload on a failure. It should be set to true for re Above this size files will be chunked into a _segments container. Above this size files will be chunked into a _segments container. The -default for this is 5GB which is its maximum value. +default for this is 5 GiB which is its maximum value. - Config: chunk_size - Env Var: RCLONE_SWIFT_CHUNK_SIZE @@ -458,7 +458,7 @@ Don't chunk files during streaming upload. When doing streaming uploads (e.g. using rcat or mount) setting this flag will cause the swift backend to not upload chunked files. -This will limit the maximum upload size to 5GB. However non chunked +This will limit the maximum upload size to 5 GiB. However non chunked files are easier to deal with and have an MD5SUM. Rclone will still chunk files bigger than chunk_size when doing normal diff --git a/docs/content/yandex.md b/docs/content/yandex.md index 4538523bd..93ce64634 100644 --- a/docs/content/yandex.md +++ b/docs/content/yandex.md @@ -114,15 +114,15 @@ as they can't be used in JSON strings. ### Limitations ### -When uploading very large files (bigger than about 5GB) you will need +When uploading very large files (bigger than about 5 GiB) you will need to increase the `--timeout` parameter. This is because Yandex pauses (perhaps to calculate the MD5SUM for the entire file) before returning confirmation that the file has been uploaded. The default handling of timeouts in rclone is to assume a 5 minute pause is an error and close the connection - you'll see `net/http: timeout awaiting response headers` errors in the logs if this is happening. Setting the timeout -to twice the max size of file in GB should be enough, so if you want -to upload a 30GB file set a timeout of `2 * 30 = 60m`, that is +to twice the max size of file in GiB should be enough, so if you want +to upload a 30 GiB file set a timeout of `2 * 30 = 60m`, that is `--timeout 60m`. {{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/yandex/yandex.go then run make backenddocs" >}} diff --git a/fs/accounting/stats.go b/fs/accounting/stats.go index b3cd784d0..bb047ae84 100644 --- a/fs/accounting/stats.go +++ b/fs/accounting/stats.go @@ -300,15 +300,13 @@ func (s *StatsInfo) String() string { dateString = "" elapsedTime = time.Since(startTime) elapsedTimeSecondsOnly = elapsedTime.Truncate(time.Second/10) % time.Minute - displaySpeed = ts.speed - displaySpeedUnit string + displaySpeedString string ) if s.ci.DataRateUnit == "bits" { - displaySpeed *= 8 - displaySpeedUnit = "bit/s" + displaySpeedString = fs.SizeSuffix(ts.speed * 8).BitRateUnit() } else { - displaySpeedUnit = "Byte/s" + displaySpeedString = fs.SizeSuffix(ts.speed).ByteRateUnit() } if !s.ci.StatsOneLine { @@ -330,12 +328,12 @@ func (s *StatsInfo) String() string { } } - _, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s", + _, _ = fmt.Fprintf(buf, "%s%11s / %s, %s, %s, ETA %s%s", dateString, fs.SizeSuffix(s.bytes), - fs.SizeSuffix(ts.totalBytes).Unit("Byte"), + fs.SizeSuffix(ts.totalBytes).ByteUnit(), percent(s.bytes, ts.totalBytes), - fs.SizeSuffix(displaySpeed).Unit(displaySpeedUnit), + displaySpeedString, etaString(s.bytes, ts.totalBytes, ts.speed), xfrchkString, ) diff --git a/fs/accounting/token_bucket_test.go b/fs/accounting/token_bucket_test.go index 4210bc188..2c3e3bcce 100644 --- a/fs/accounting/token_bucket_test.go +++ b/fs/accounting/token_bucket_test.go @@ -24,7 +24,7 @@ func TestRcBwLimit(t *testing.T) { "bytesPerSecond": int64(1048576), "bytesPerSecondTx": int64(1048576), "bytesPerSecondRx": int64(1048576), - "rate": "1M", + "rate": "1Mi", }, out) assert.Equal(t, rate.Limit(1048576), TokenBucket.curr[0].Limit()) @@ -36,7 +36,7 @@ func TestRcBwLimit(t *testing.T) { "bytesPerSecond": int64(1048576), "bytesPerSecondTx": int64(1048576), "bytesPerSecondRx": int64(1048576), - "rate": "1M", + "rate": "1Mi", }, out) // Set @@ -49,7 +49,7 @@ func TestRcBwLimit(t *testing.T) { "bytesPerSecond": int64(10485760), "bytesPerSecondTx": int64(10485760), "bytesPerSecondRx": int64(1048576), - "rate": "10M:1M", + "rate": "10Mi:1Mi", }, out) assert.Equal(t, rate.Limit(10485760), TokenBucket.curr[0].Limit()) @@ -61,7 +61,7 @@ func TestRcBwLimit(t *testing.T) { "bytesPerSecond": int64(10485760), "bytesPerSecondTx": int64(10485760), "bytesPerSecondRx": int64(1048576), - "rate": "10M:1M", + "rate": "10Mi:1Mi", }, out) // Reset diff --git a/fs/bwtimetable_test.go b/fs/bwtimetable_test.go index b4d6fb829..2e3f677f6 100644 --- a/fs/bwtimetable_test.go +++ b/fs/bwtimetable_test.go @@ -43,7 +43,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}}, }, false, - "666k", + "666Ki", }, { "666:333", @@ -51,7 +51,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}}, }, false, - "666k:333k", + "666Ki:333Ki", }, { "10:20,666", @@ -65,7 +65,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}}, }, false, - "Sun-10:20,666k Mon-10:20,666k Tue-10:20,666k Wed-10:20,666k Thu-10:20,666k Fri-10:20,666k Sat-10:20,666k", + "Sun-10:20,666Ki Mon-10:20,666Ki Tue-10:20,666Ki Wed-10:20,666Ki Thu-10:20,666Ki Fri-10:20,666Ki Sat-10:20,666Ki", }, { "10:20,666:333", @@ -79,7 +79,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}}, }, false, - "Sun-10:20,666k:333k Mon-10:20,666k:333k Tue-10:20,666k:333k Wed-10:20,666k:333k Thu-10:20,666k:333k Fri-10:20,666k:333k Sat-10:20,666k:333k", + "Sun-10:20,666Ki:333Ki Mon-10:20,666Ki:333Ki Tue-10:20,666Ki:333Ki Wed-10:20,666Ki:333Ki Thu-10:20,666Ki:333Ki Fri-10:20,666Ki:333Ki Sat-10:20,666Ki:333Ki", }, { "11:00,333 13:40,666 23:50,10M 23:59,off", @@ -114,7 +114,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}}, }, false, - "Sun-11:00,333k Mon-11:00,333k Tue-11:00,333k Wed-11:00,333k Thu-11:00,333k Fri-11:00,333k Sat-11:00,333k Sun-13:40,666k Mon-13:40,666k Tue-13:40,666k Wed-13:40,666k Thu-13:40,666k Fri-13:40,666k Sat-13:40,666k Sun-23:50,10M Mon-23:50,10M Tue-23:50,10M Wed-23:50,10M Thu-23:50,10M Fri-23:50,10M Sat-23:50,10M Sun-23:59,off Mon-23:59,off Tue-23:59,off Wed-23:59,off Thu-23:59,off Fri-23:59,off Sat-23:59,off", + "Sun-11:00,333Ki Mon-11:00,333Ki Tue-11:00,333Ki Wed-11:00,333Ki Thu-11:00,333Ki Fri-11:00,333Ki Sat-11:00,333Ki Sun-13:40,666Ki Mon-13:40,666Ki Tue-13:40,666Ki Wed-13:40,666Ki Thu-13:40,666Ki Fri-13:40,666Ki Sat-13:40,666Ki Sun-23:50,10Mi Mon-23:50,10Mi Tue-23:50,10Mi Wed-23:50,10Mi Thu-23:50,10Mi Fri-23:50,10Mi Sat-23:50,10Mi Sun-23:59,off Mon-23:59,off Tue-23:59,off Wed-23:59,off Thu-23:59,off Fri-23:59,off Sat-23:59,off", }, { "11:00,333:666 13:40,666:off 23:50,10M:1M 23:59,off:10M", @@ -149,7 +149,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}}, }, false, - "Sun-11:00,333k:666k Mon-11:00,333k:666k Tue-11:00,333k:666k Wed-11:00,333k:666k Thu-11:00,333k:666k Fri-11:00,333k:666k Sat-11:00,333k:666k Sun-13:40,666k:off Mon-13:40,666k:off Tue-13:40,666k:off Wed-13:40,666k:off Thu-13:40,666k:off Fri-13:40,666k:off Sat-13:40,666k:off Sun-23:50,10M:1M Mon-23:50,10M:1M Tue-23:50,10M:1M Wed-23:50,10M:1M Thu-23:50,10M:1M Fri-23:50,10M:1M Sat-23:50,10M:1M Sun-23:59,off:10M Mon-23:59,off:10M Tue-23:59,off:10M Wed-23:59,off:10M Thu-23:59,off:10M Fri-23:59,off:10M Sat-23:59,off:10M", + "Sun-11:00,333Ki:666Ki Mon-11:00,333Ki:666Ki Tue-11:00,333Ki:666Ki Wed-11:00,333Ki:666Ki Thu-11:00,333Ki:666Ki Fri-11:00,333Ki:666Ki Sat-11:00,333Ki:666Ki Sun-13:40,666Ki:off Mon-13:40,666Ki:off Tue-13:40,666Ki:off Wed-13:40,666Ki:off Thu-13:40,666Ki:off Fri-13:40,666Ki:off Sat-13:40,666Ki:off Sun-23:50,10Mi:1Mi Mon-23:50,10Mi:1Mi Tue-23:50,10Mi:1Mi Wed-23:50,10Mi:1Mi Thu-23:50,10Mi:1Mi Fri-23:50,10Mi:1Mi Sat-23:50,10Mi:1Mi Sun-23:59,off:10Mi Mon-23:59,off:10Mi Tue-23:59,off:10Mi Wed-23:59,off:10Mi Thu-23:59,off:10Mi Fri-23:59,off:10Mi Sat-23:59,off:10Mi", }, { "Mon-11:00,333 Tue-13:40,666:333 Fri-00:00,10M Sat-10:00,off Sun-23:00,666", @@ -161,7 +161,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}}, }, false, - "Mon-11:00,333k Tue-13:40,666k:333k Fri-00:00,10M Sat-10:00,off Sun-23:00,666k", + "Mon-11:00,333Ki Tue-13:40,666Ki:333Ki Fri-00:00,10Mi Sat-10:00,off Sun-23:00,666Ki", }, { "Mon-11:00,333 Tue-13:40,666 Fri-00:00,10M 00:01,off Sun-23:00,666:off", @@ -179,7 +179,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}}, }, false, - "Mon-11:00,333k Tue-13:40,666k Fri-00:00,10M Sun-00:01,off Mon-00:01,off Tue-00:01,off Wed-00:01,off Thu-00:01,off Fri-00:01,off Sat-00:01,off Sun-23:00,666k:off", + "Mon-11:00,333Ki Tue-13:40,666Ki Fri-00:00,10Mi Sun-00:01,off Mon-00:01,off Tue-00:01,off Wed-00:01,off Thu-00:01,off Fri-00:01,off Sat-00:01,off Sun-23:00,666Ki:off", }, { // from the docs @@ -222,7 +222,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 6, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}}, }, false, - "Sun-08:00,512k Mon-08:00,512k Tue-08:00,512k Wed-08:00,512k Thu-08:00,512k Fri-08:00,512k Sat-08:00,512k Sun-12:00,10M Mon-12:00,10M Tue-12:00,10M Wed-12:00,10M Thu-12:00,10M Fri-12:00,10M Sat-12:00,10M Sun-13:00,512k Mon-13:00,512k Tue-13:00,512k Wed-13:00,512k Thu-13:00,512k Fri-13:00,512k Sat-13:00,512k Sun-18:00,30M Mon-18:00,30M Tue-18:00,30M Wed-18:00,30M Thu-18:00,30M Fri-18:00,30M Sat-18:00,30M Sun-23:00,off Mon-23:00,off Tue-23:00,off Wed-23:00,off Thu-23:00,off Fri-23:00,off Sat-23:00,off", + "Sun-08:00,512Ki Mon-08:00,512Ki Tue-08:00,512Ki Wed-08:00,512Ki Thu-08:00,512Ki Fri-08:00,512Ki Sat-08:00,512Ki Sun-12:00,10Mi Mon-12:00,10Mi Tue-12:00,10Mi Wed-12:00,10Mi Thu-12:00,10Mi Fri-12:00,10Mi Sat-12:00,10Mi Sun-13:00,512Ki Mon-13:00,512Ki Tue-13:00,512Ki Wed-13:00,512Ki Thu-13:00,512Ki Fri-13:00,512Ki Sat-13:00,512Ki Sun-18:00,30Mi Mon-18:00,30Mi Tue-18:00,30Mi Wed-18:00,30Mi Thu-18:00,30Mi Fri-18:00,30Mi Sat-18:00,30Mi Sun-23:00,off Mon-23:00,off Tue-23:00,off Wed-23:00,off Thu-23:00,off Fri-23:00,off Sat-23:00,off", }, { // from the docs @@ -234,7 +234,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 0, HHMM: 2000, Bandwidth: BwPair{Tx: -1, Rx: -1}}, }, false, - "Mon-00:00,512k Fri-23:59,10M Sat-10:00,1M Sun-20:00,off", + "Mon-00:00,512Ki Fri-23:59,10Mi Sat-10:00,1Mi Sun-20:00,off", }, { // from the docs @@ -251,7 +251,7 @@ func TestBwTimetableSet(t *testing.T) { BwTimeSlot{DayOfTheWeek: 0, HHMM: 2000, Bandwidth: BwPair{Tx: -1, Rx: -1}}, }, false, - "Mon-00:00,512k Sun-12:00,1M Mon-12:00,1M Tue-12:00,1M Wed-12:00,1M Thu-12:00,1M Fri-12:00,1M Sat-12:00,1M Sun-20:00,off", + "Mon-00:00,512Ki Sun-12:00,1Mi Mon-12:00,1Mi Tue-12:00,1Mi Wed-12:00,1Mi Thu-12:00,1Mi Fri-12:00,1Mi Sat-12:00,1Mi Sun-20:00,off", }, } { tt := BwTimetable{} @@ -537,13 +537,13 @@ func TestBwTimetableMarshalJSON(t *testing.T) { BwTimetable{ BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}}, }, - `"666k"`, + `"666Ki"`, }, { BwTimetable{ BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}}, }, - `"666k:333k"`, + `"666Ki:333Ki"`, }, { BwTimetable{ @@ -555,7 +555,7 @@ func TestBwTimetableMarshalJSON(t *testing.T) { BwTimeSlot{DayOfTheWeek: 5, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}}, BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}}, }, - `"Sun-10:20,666k Mon-10:20,666k Tue-10:20,666k Wed-10:20,666k Thu-10:20,666k Fri-10:20,666k Sat-10:20,666k"`, + `"Sun-10:20,666Ki Mon-10:20,666Ki Tue-10:20,666Ki Wed-10:20,666Ki Thu-10:20,666Ki Fri-10:20,666Ki Sat-10:20,666Ki"`, }, } { got, err := json.Marshal(test.in) diff --git a/fs/config/configflags/configflags.go b/fs/config/configflags/configflags.go index 988d5c8fe..7a00c14f1 100644 --- a/fs/config/configflags/configflags.go +++ b/fs/config/configflags/configflags.go @@ -97,8 +97,8 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) { flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit") flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR") - flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in kByte/s, or use suffix b|k|M|G or a full timetable.") - flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kByte/s, or use suffix b|k|M|G or a full timetable.") + flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable.") + flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable.") flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.") flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.") flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList) diff --git a/fs/config/ui.go b/fs/config/ui.go index a67a8bbad..e3874852e 100644 --- a/fs/config/ui.go +++ b/fs/config/ui.go @@ -353,7 +353,7 @@ func ChooseOption(o *fs.Option, name string) string { case bool: what = "boolean value (true or false)" case fs.SizeSuffix: - what = "size with suffix k,M,G,T" + what = "size with suffix K,M,G,T" case fs.Duration: what = "duration s,m,h,d,w,M,y" case int, int8, int16, int32, int64: diff --git a/fs/filter/filterflags/filterflags.go b/fs/filter/filterflags/filterflags.go index 3a3d3e4c7..e926fd894 100644 --- a/fs/filter/filterflags/filterflags.go +++ b/fs/filter/filterflags/filterflags.go @@ -41,8 +41,8 @@ func AddFlags(flagSet *pflag.FlagSet) { flags.StringArrayVarP(flagSet, &Opt.FilesFromRaw, "files-from-raw", "", nil, "Read list of source-file names from file without any processing of lines (use - to read from stdin)") flags.FVarP(flagSet, &Opt.MinAge, "min-age", "", "Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y") flags.FVarP(flagSet, &Opt.MaxAge, "max-age", "", "Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y") - flags.FVarP(flagSet, &Opt.MinSize, "min-size", "", "Only transfer files bigger than this in k or suffix b|k|M|G") - flags.FVarP(flagSet, &Opt.MaxSize, "max-size", "", "Only transfer files smaller than this in k or suffix b|k|M|G") + flags.FVarP(flagSet, &Opt.MinSize, "min-size", "", "Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P") + flags.FVarP(flagSet, &Opt.MaxSize, "max-size", "", "Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P") flags.BoolVarP(flagSet, &Opt.IgnoreCase, "ignore-case", "", false, "Ignore case in filters (case insensitive)") //cvsExclude = BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does") } diff --git a/fs/fs_test.go b/fs/fs_test.go index 0ad09cf07..1f7aba038 100644 --- a/fs/fs_test.go +++ b/fs/fs_test.go @@ -99,7 +99,7 @@ func TestOption(t *testing.T) { Name: "potato", Value: SizeSuffix(17 << 20), } - assert.Equal(t, "17M", d.String()) + assert.Equal(t, "17Mi", d.String()) assert.Equal(t, "SizeSuffix", d.Type()) err := d.Set("18M") assert.NoError(t, err) diff --git a/fs/rc/jobs/job_test.go b/fs/rc/jobs/job_test.go index 29cb35009..688ca04e8 100644 --- a/fs/rc/jobs/job_test.go +++ b/fs/rc/jobs/job_test.go @@ -257,7 +257,7 @@ func TestExecuteJobWithConfig(t *testing.T) { called := false jobFn := func(ctx context.Context, in rc.Params) (rc.Params, error) { ci := fs.GetConfig(ctx) - assert.Equal(t, 42*fs.MebiByte, ci.BufferSize) + assert.Equal(t, 42*fs.Mebi, ci.BufferSize) called = true return nil, nil } @@ -278,7 +278,7 @@ func TestExecuteJobWithConfig(t *testing.T) { assert.Equal(t, true, called) // Check that wasn't the default ci := fs.GetConfig(ctx) - assert.NotEqual(t, 42*fs.MebiByte, ci.BufferSize) + assert.NotEqual(t, 42*fs.Mebi, ci.BufferSize) } func TestExecuteJobWithFilter(t *testing.T) { diff --git a/fs/sizesuffix.go b/fs/sizesuffix.go index 22412ebbf..72592d0c2 100644 --- a/fs/sizesuffix.go +++ b/fs/sizesuffix.go @@ -1,6 +1,6 @@ package fs -// SizeSuffix is parsed by flag with k/M/G suffixes +// SizeSuffix is parsed by flag with K/M/G binary suffixes import ( "encoding/json" "fmt" @@ -17,13 +17,21 @@ type SizeSuffix int64 // Common multipliers for SizeSuffix const ( - Byte SizeSuffix = 1 << (iota * 10) - KibiByte - MebiByte - GibiByte - TebiByte - PebiByte - ExbiByte + SizeSuffixBase SizeSuffix = 1 << (iota * 10) + Kibi + Mebi + Gibi + Tebi + Pebi + Exbi +) +const ( + // SizeSuffixMax is the largest SizeSuffix multiplier + SizeSuffixMax = Exbi + // SizeSuffixMaxValue is the largest value that can be used to create SizeSuffix + SizeSuffixMaxValue = math.MaxInt64 + // SizeSuffixMinValue is the smallest value that can be used to create SizeSuffix + SizeSuffixMinValue = math.MinInt64 ) // Turn SizeSuffix into a string and a suffix @@ -35,24 +43,27 @@ func (x SizeSuffix) string() (string, string) { return "off", "" case x == 0: return "0", "" - case x < 1<<10: + case x < Kibi: scaled = float64(x) suffix = "" - case x < 1<<20: - scaled = float64(x) / (1 << 10) - suffix = "k" - case x < 1<<30: - scaled = float64(x) / (1 << 20) - suffix = "M" - case x < 1<<40: - scaled = float64(x) / (1 << 30) - suffix = "G" - case x < 1<<50: - scaled = float64(x) / (1 << 40) - suffix = "T" + case x < Mebi: + scaled = float64(x) / float64(Kibi) + suffix = "Ki" + case x < Gibi: + scaled = float64(x) / float64(Mebi) + suffix = "Mi" + case x < Tebi: + scaled = float64(x) / float64(Gibi) + suffix = "Gi" + case x < Pebi: + scaled = float64(x) / float64(Tebi) + suffix = "Ti" + case x < Exbi: + scaled = float64(x) / float64(Pebi) + suffix = "Pi" default: - scaled = float64(x) / (1 << 50) - suffix = "P" + scaled = float64(x) / float64(Exbi) + suffix = "Ei" } if math.Floor(scaled) == scaled { return fmt.Sprintf("%.0f", scaled), suffix @@ -67,12 +78,67 @@ func (x SizeSuffix) String() string { } // Unit turns SizeSuffix into a string with a unit -func (x SizeSuffix) Unit(unit string) string { +func (x SizeSuffix) unit(unit string) string { val, suffix := x.string() if val == "off" { return val } - return val + " " + suffix + unit + var suffixUnit string + if suffix != "" && unit != "" { + suffixUnit = suffix + unit + } else { + suffixUnit = suffix + unit + } + return val + " " + suffixUnit +} + +// BitUnit turns SizeSuffix into a string with bit unit +func (x SizeSuffix) BitUnit() string { + return x.unit("bit") +} + +// BitRateUnit turns SizeSuffix into a string with bit rate unit +func (x SizeSuffix) BitRateUnit() string { + return x.unit("bit/s") +} + +// ByteUnit turns SizeSuffix into a string with byte unit +func (x SizeSuffix) ByteUnit() string { + return x.unit("Byte") +} + +// ByteRateUnit turns SizeSuffix into a string with byte rate unit +func (x SizeSuffix) ByteRateUnit() string { + return x.unit("Byte/s") +} + +// ByteShortUnit turns SizeSuffix into a string with byte unit short form +func (x SizeSuffix) ByteShortUnit() string { + return x.unit("B") +} + +// ByteRateShortUnit turns SizeSuffix into a string with byte rate unit short form +func (x SizeSuffix) ByteRateShortUnit() string { + return x.unit("B/s") +} + +func (x *SizeSuffix) multiplierFromSymbol(s byte) (found bool, multiplier float64) { + switch s { + case 'k', 'K': + return true, float64(Kibi) + case 'm', 'M': + return true, float64(Mebi) + case 'g', 'G': + return true, float64(Gibi) + case 't', 'T': + return true, float64(Tebi) + case 'p', 'P': + return true, float64(Pebi) + case 'e', 'E': + return true, float64(Exbi) + default: + return false, float64(SizeSuffixBase) + } } // Set a SizeSuffix @@ -86,25 +152,42 @@ func (x *SizeSuffix) Set(s string) error { } suffix := s[len(s)-1] suffixLen := 1 + multiplierFound := false var multiplier float64 switch suffix { case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.': suffixLen = 0 - multiplier = 1 << 10 + multiplier = float64(Kibi) case 'b', 'B': - multiplier = 1 - case 'k', 'K': - multiplier = 1 << 10 - case 'm', 'M': - multiplier = 1 << 20 - case 'g', 'G': - multiplier = 1 << 30 - case 't', 'T': - multiplier = 1 << 40 - case 'p', 'P': - multiplier = 1 << 50 + if len(s) > 2 && s[len(s)-2] == 'i' { + suffix = s[len(s)-3] + suffixLen = 3 + if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound { + return errors.Errorf("bad suffix %q", suffix) + } + // Could also support SI form MB, and treat it equivalent to MiB, but perhaps better to reserve it for CountSuffix? + //} else if len(s) > 1 { + // suffix = s[len(s)-2] + // if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); multiplierFound { + // suffixLen = 2 + // } + //} + } else { + multiplier = float64(SizeSuffixBase) + } + case 'i', 'I': + if len(s) > 1 { + suffix = s[len(s)-2] + suffixLen = 2 + multiplierFound, multiplier = x.multiplierFromSymbol(suffix) + } + if !multiplierFound { + return errors.Errorf("bad suffix %q", suffix) + } default: - return errors.Errorf("bad suffix %q", suffix) + if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound { + return errors.Errorf("bad suffix %q", suffix) + } } s = s[:len(s)-suffixLen] value, err := strconv.ParseFloat(s, 64) diff --git a/fs/sizesuffix_test.go b/fs/sizesuffix_test.go index 2497a1700..e215c74db 100644 --- a/fs/sizesuffix_test.go +++ b/fs/sizesuffix_test.go @@ -27,11 +27,11 @@ func TestSizeSuffixString(t *testing.T) { }{ {0, "0"}, {102, "102"}, - {1024, "1k"}, - {1024 * 1024, "1M"}, - {1024 * 1024 * 1024, "1G"}, - {10 * 1024 * 1024 * 1024, "10G"}, - {10.1 * 1024 * 1024 * 1024, "10.100G"}, + {1024, "1Ki"}, + {1024 * 1024, "1Mi"}, + {1024 * 1024 * 1024, "1Gi"}, + {10 * 1024 * 1024 * 1024, "10Gi"}, + {10.1 * 1024 * 1024 * 1024, "10.100Gi"}, {-1, "off"}, {-100, "off"}, } { @@ -41,26 +41,73 @@ func TestSizeSuffixString(t *testing.T) { } } -func TestSizeSuffixUnit(t *testing.T) { +func TestSizeSuffixByteShortUnit(t *testing.T) { for _, test := range []struct { in float64 want string }{ - {0, "0 Bytes"}, - {102, "102 Bytes"}, - {1024, "1 kBytes"}, - {1024 * 1024, "1 MBytes"}, - {1024 * 1024 * 1024, "1 GBytes"}, - {10 * 1024 * 1024 * 1024, "10 GBytes"}, - {10.1 * 1024 * 1024 * 1024, "10.100 GBytes"}, - {10 * 1024 * 1024 * 1024 * 1024, "10 TBytes"}, - {10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 PBytes"}, - {1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1024 PBytes"}, + {0, "0 B"}, + {102, "102 B"}, + {1024, "1 KiB"}, + {1024 * 1024, "1 MiB"}, + {1024 * 1024 * 1024, "1 GiB"}, + {10 * 1024 * 1024 * 1024, "10 GiB"}, + {10.1 * 1024 * 1024 * 1024, "10.100 GiB"}, + {10 * 1024 * 1024 * 1024 * 1024, "10 TiB"}, + {10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 PiB"}, + {1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1 EiB"}, {-1, "off"}, {-100, "off"}, } { ss := SizeSuffix(test.in) - got := ss.Unit("Bytes") + got := ss.ByteShortUnit() + assert.Equal(t, test.want, got) + } +} + +func TestSizeSuffixByteUnit(t *testing.T) { + for _, test := range []struct { + in float64 + want string + }{ + {0, "0 Byte"}, + {102, "102 Byte"}, + {1024, "1 KiByte"}, + {1024 * 1024, "1 MiByte"}, + {1024 * 1024 * 1024, "1 GiByte"}, + {10 * 1024 * 1024 * 1024, "10 GiByte"}, + {10.1 * 1024 * 1024 * 1024, "10.100 GiByte"}, + {10 * 1024 * 1024 * 1024 * 1024, "10 TiByte"}, + {10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 PiByte"}, + {1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1 EiByte"}, + {-1, "off"}, + {-100, "off"}, + } { + ss := SizeSuffix(test.in) + got := ss.ByteUnit() + assert.Equal(t, test.want, got) + } +} + +func TestSizeSuffixBitRateUnit(t *testing.T) { + for _, test := range []struct { + in float64 + want string + }{ + {0, "0 bit/s"}, + {1024, "1 Kibit/s"}, + {1024 * 1024, "1 Mibit/s"}, + {1024 * 1024 * 1024, "1 Gibit/s"}, + {10 * 1024 * 1024 * 1024, "10 Gibit/s"}, + {10.1 * 1024 * 1024 * 1024, "10.100 Gibit/s"}, + {10 * 1024 * 1024 * 1024 * 1024, "10 Tibit/s"}, + {10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 Pibit/s"}, + {1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1 Eibit/s"}, + {-1, "off"}, + {-100, "off"}, + } { + ss := SizeSuffix(test.in) + got := ss.BitRateUnit() assert.Equal(t, test.want, got) } } @@ -77,9 +124,23 @@ func TestSizeSuffixSet(t *testing.T) { {"0.1k", 102, false}, {"0.1", 102, false}, {"1K", 1024, false}, + {"1k", 1024, false}, + //{"1KB", 1024, false}, + //{"1kB", 1024, false}, + //{"1kb", 1024, false}, + {"1KI", 1024, false}, + {"1Ki", 1024, false}, + {"1kI", 1024, false}, + {"1ki", 1024, false}, + {"1KiB", 1024, false}, + {"1KiB", 1024, false}, + {"1kib", 1024, false}, {"1", 1024, false}, {"2.5", 1024 * 2.5, false}, {"1M", 1024 * 1024, false}, + //{"1MB", 1024 * 1024, false}, + {"1Mi", 1024 * 1024, false}, + {"1MiB", 1024 * 1024, false}, {"1.g", 1024 * 1024 * 1024, false}, {"10G", 10 * 1024 * 1024 * 1024, false}, {"10T", 10 * 1024 * 1024 * 1024 * 1024, false}, @@ -91,6 +152,9 @@ func TestSizeSuffixSet(t *testing.T) { {"1.q", 0, true}, {"1q", 0, true}, {"-1K", 0, true}, + {"1i", 0, true}, + {"1iB", 0, true}, + {"1MB", 0, true}, } { ss := SizeSuffix(0) err := ss.Set(test.in) diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go index 332226775..a5fc12e3e 100644 --- a/fstest/fstests/fstests.go +++ b/fstest/fstests/fstests.go @@ -1792,7 +1792,7 @@ func Run(t *testing.T, opt *Opt) { minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize) } - maxChunkSize := 2 * fs.MebiByte + maxChunkSize := 2 * fs.Mebi if maxChunkSize < 2*minChunkSize { maxChunkSize = 2 * minChunkSize } diff --git a/vfs/vfs.go b/vfs/vfs.go index e4533fa35..ab506f9ad 100644 --- a/vfs/vfs.go +++ b/vfs/vfs.go @@ -543,7 +543,7 @@ func fillInMissingSizes(total, used, free, unknownFree int64) (newTotal, newUsed return total, used, free } -// If the total size isn't known then we will aim for this many bytes free (1PB) +// If the total size isn't known then we will aim for this many bytes free (1 PiB) const unknownFreeBytes = 1 << 50 // Statfs returns into about the filing system if known diff --git a/vfs/vfscache/downloaders/downloaders.go b/vfs/vfscache/downloaders/downloaders.go index 9973d0cef..94c4133d8 100644 --- a/vfs/vfscache/downloaders/downloaders.go +++ b/vfs/vfscache/downloaders/downloaders.go @@ -593,7 +593,7 @@ func (dl *downloader) _stop() { // stop the downloader by stopping the async reader buffering // any more input. This causes all the stuff in the async - // buffer (which can be many MB) to be written to the disk + // buffer (which can be many MiB) to be written to the disk // before exiting. if dl.in != nil { dl.in.StopBuffering() diff --git a/vfs/vfscommon/options.go b/vfs/vfscommon/options.go index 22ea117d6..36476d417 100644 --- a/vfs/vfscommon/options.go +++ b/vfs/vfscommon/options.go @@ -51,13 +51,13 @@ var DefaultOpt = Options{ CacheMode: CacheModeOff, CacheMaxAge: 3600 * time.Second, CachePollInterval: 60 * time.Second, - ChunkSize: 128 * fs.MebiByte, + ChunkSize: 128 * fs.Mebi, ChunkSizeLimit: -1, CacheMaxSize: -1, CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise WriteWait: 1000 * time.Millisecond, ReadWait: 20 * time.Millisecond, WriteBack: 5 * time.Second, - ReadAhead: 0 * fs.MebiByte, + ReadAhead: 0 * fs.Mebi, UsedIsSize: false, }