From 98579608ecd03ec3d02c571e92bb7b75586d63e4 Mon Sep 17 00:00:00 2001 From: albertony <12441419+albertony@users.noreply.github.com> Date: Wed, 7 Apr 2021 12:23:42 +0200 Subject: [PATCH] docs: cleanup spelling of size and rate units --- cmd/cmd.go | 2 +- docs/content/docs.md | 36 ++++++++++++++-------------- docs/content/drive.md | 2 +- docs/content/filtering.md | 4 ++-- docs/content/flags.md | 4 ++-- docs/content/rc.md | 4 ++-- fs/accounting/accounting.go | 2 +- fs/accounting/prometheus.go | 2 +- fs/accounting/stats_groups.go | 6 ++--- fs/accounting/token_bucket.go | 6 ++--- fs/config/configflags/configflags.go | 4 ++-- fs/sync/sync_test.go | 2 +- 12 files changed, 37 insertions(+), 37 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index fd04ca828..716889d5a 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -49,7 +49,7 @@ var ( cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file") memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file") statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)") - dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s") + dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second") version bool retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail") retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)") diff --git a/docs/content/docs.md b/docs/content/docs.md index 8892b91e5..a382e8db3 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -422,8 +422,8 @@ fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Options which use SIZE use kByte by default. However, a suffix of `b` -for bytes, `k` for kBytes, `M` for MBytes, `G` for GBytes, `T` for -TBytes and `P` for PBytes may be used. These are the binary units, e.g. +for bytes, `k` for kByte, `M` for MByte, `G` for GByte, `T` for +TByte and `P` for PByte may be used. These are the binary units, e.g. 1, 2\*\*10, 2\*\*20, 2\*\*30 respectively. ### --backup-dir=DIR ### @@ -469,7 +469,7 @@ This option controls the bandwidth limit. For example would mean limit the upload and download bandwidth to 10 MByte/s. **NB** this is **bytes** per second not **bits** per second. To use a -single limit, specify the desired bandwidth in kBytes/s, or use a +single limit, specify the desired bandwidth in kByte/s, or use a suffix b|k|M|G. The default is `0` which means to not limit bandwidth. The upload and download bandwidth can be specified seperately, as @@ -483,7 +483,7 @@ to just limit the upload bandwidth you would use --bwlimit 10M:off -this would limit the upload bandwidth to 10MByte/s but the download +this would limit the upload bandwidth to 10 MByte/s but the download bandwidth would be unlimited. When specified as above the bandwidth limits last for the duration of @@ -505,19 +505,19 @@ working hours could be: `--bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off"` -In this example, the transfer bandwidth will be set to 512kBytes/sec -at 8am every day. At noon, it will rise to 10MByte/s, and drop back -to 512kBytes/sec at 1pm. At 6pm, the bandwidth limit will be set to -30MByte/s, and at 11pm it will be completely disabled (full speed). +In this example, the transfer bandwidth will be set to 512 kByte/s +at 8am every day. At noon, it will rise to 10 MByte/s, and drop back +to 512 kByte/sec at 1pm. At 6pm, the bandwidth limit will be set to +30 MByte/s, and at 11pm it will be completely disabled (full speed). Anything between 11pm and 8am will remain unlimited. An example of timetable with `WEEKDAY` could be: `--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"` -It means that, the transfer bandwidth will be set to 512kBytes/sec on -Monday. It will rise to 10MByte/s before the end of Friday. At 10:00 -on Saturday it will be set to 1MByte/s. From 20:00 on Sunday it will +It means that, the transfer bandwidth will be set to 512 kByte/s on +Monday. It will rise to 10 MByte/s before the end of Friday. At 10:00 +on Saturday it will be set to 1 MByte/s. From 20:00 on Sunday it will be unlimited. Timeslots without `WEEKDAY` are extended to the whole week. So this @@ -533,10 +533,10 @@ Bandwidth limit apply to the data transfer for all backends. For most backends the directory listing bandwidth is also included (exceptions being the non HTTP backends, `ftp`, `sftp` and `tardigrade`). -Note that the units are **Bytes/s**, not **Bits/s**. Typically -connections are measured in Bits/s - to convert divide by 8. For +Note that the units are **Byte/s**, not **bit/s**. Typically +connections are measured in bit/s - to convert divide by 8. For example, let's say you have a 10 Mbit/s connection and you wish rclone -to use half of it - 5 Mbit/s. This is 5/8 = 0.625MByte/s so you would +to use half of it - 5 Mbit/s. This is 5/8 = 0.625 MByte/s so you would use a `--bwlimit 0.625M` parameter for rclone. On Unix systems (Linux, macOS, …) the bandwidth limiter can be toggled by @@ -557,7 +557,7 @@ change the bwlimit dynamically: This option controls per file bandwidth limit. For the options see the `--bwlimit` flag. -For example use this to allow no transfers to be faster than 1MByte/s +For example use this to allow no transfers to be faster than 1 MByte/s --bwlimit-file 1M @@ -1473,14 +1473,14 @@ date formatting syntax. ### --stats-unit=bits|bytes ### -By default, data transfer rates will be printed in bytes/second. +By default, data transfer rates will be printed in bytes per second. -This option allows the data rate to be printed in bits/second. +This option allows the data rate to be printed in bits per second. Data transfer volume will still be reported in bytes. The rate is reported as a binary unit, not SI unit. So 1 Mbit/s -equals 1,048,576 bits/s and not 1,000,000 bits/s. +equals 1,048,576 bit/s and not 1,000,000 bit/s. The default is `bytes`. diff --git a/docs/content/drive.md b/docs/content/drive.md index 4e81fc952..c2c434840 100644 --- a/docs/content/drive.md +++ b/docs/content/drive.md @@ -1226,7 +1226,7 @@ Use the -i flag to see what would be copied before copying. Drive has quite a lot of rate limiting. This causes rclone to be limited to transferring about 2 files per second only. Individual -files may be transferred much faster at 100s of MBytes/s but lots of +files may be transferred much faster at 100s of MByte/s but lots of small files can take a long time. Server side copies are also subject to a separate rate limit. If you diff --git a/docs/content/filtering.md b/docs/content/filtering.md index 4fc4ae311..1119c340a 100644 --- a/docs/content/filtering.md +++ b/docs/content/filtering.md @@ -588,7 +588,7 @@ remote or flag value. The fix then is to quote values containing spaces. Controls the minimum size file within the scope of an rclone command. Default units are `kBytes` but abbreviations `k`, `M`, or `G` are valid. -E.g. `rclone ls remote: --min-size 50k` lists files on `remote:` of 50kByte +E.g. `rclone ls remote: --min-size 50k` lists files on `remote:` of 50 kByte size or larger. ### `--max-size` - Don't transfer any file larger than this @@ -596,7 +596,7 @@ size or larger. Controls the maximum size file within the scope of an rclone command. Default units are `kBytes` but abbreviations `k`, `M`, or `G` are valid. -E.g. `rclone ls remote: --max-size 1G` lists files on `remote:` of 1GByte +E.g. `rclone ls remote: --max-size 1G` lists files on `remote:` of 1 GByte size or smaller. ### `--max-age` - Don't transfer any file older than this diff --git a/docs/content/flags.md b/docs/content/flags.md index 48d37d4ea..aeb20483e 100755 --- a/docs/content/flags.md +++ b/docs/content/flags.md @@ -18,8 +18,8 @@ These flags are available for every command. --backup-dir string Make backups into hierarchy based in DIR. --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name. --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M) - --bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable. - --bwlimit-file BwTimetable Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable. + --bwlimit BwTimetable Bandwidth limit in kByte/s, or use suffix b|k|M|G or a full timetable. + --bwlimit-file BwTimetable Bandwidth limit per file in kByte/s, or use suffix b|k|M|G or a full timetable. --ca-cert string CA certificate used to verify servers --cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone") --check-first Do all the checks before starting transfers. diff --git a/docs/content/rc.md b/docs/content/rc.md index db65dc56a..f5082f375 100644 --- a/docs/content/rc.md +++ b/docs/content/rc.md @@ -788,8 +788,8 @@ Returns the following values: "eta": estimated time in seconds until file transfer completion "name": name of the file, "percentage": progress of the file transfer in percent, - "speed": average speed over the whole transfer in bytes/sec, - "speedAvg": current speed in bytes/sec as an exponentially weighted moving average, + "speed": average speed over the whole transfer in bytes per second, + "speedAvg": current speed in bytes per second as an exponentially weighted moving average, "size": size of the file in bytes } ], diff --git a/fs/accounting/accounting.go b/fs/accounting/accounting.go index b82480068..ef079e36c 100644 --- a/fs/accounting/accounting.go +++ b/fs/accounting/accounting.go @@ -74,7 +74,7 @@ type accountValues struct { start time.Time // Start time of first read lpTime time.Time // Time of last average measurement lpBytes int // Number of bytes read since last measurement - avg float64 // Moving average of last few measurements in bytes/s + avg float64 // Moving average of last few measurements in Byte/s } const averagePeriod = 16 // period to do exponentially weighted averages over diff --git a/fs/accounting/prometheus.go b/fs/accounting/prometheus.go index 9138d9120..385a94e62 100644 --- a/fs/accounting/prometheus.go +++ b/fs/accounting/prometheus.go @@ -32,7 +32,7 @@ func NewRcloneCollector(ctx context.Context) *RcloneCollector { nil, nil, ), transferSpeed: prometheus.NewDesc(namespace+"speed", - "Average speed in bytes/sec since the start of the Rclone process", + "Average speed in bytes per second since the start of the Rclone process", nil, nil, ), numOfErrors: prometheus.NewDesc(namespace+"errors_total", diff --git a/fs/accounting/stats_groups.go b/fs/accounting/stats_groups.go index 49657fc83..7be1597a3 100644 --- a/fs/accounting/stats_groups.go +++ b/fs/accounting/stats_groups.go @@ -96,7 +96,7 @@ Returns the following values: "lastError": last error string, "renames" : number of files renamed, "retryError": boolean showing whether there has been at least one non-NoRetryError, - "speed": average speed in bytes/sec since start of the group, + "speed": average speed in bytes per second since start of the group, "totalBytes": total number of bytes in the group, "totalChecks": total number of checks in the group, "totalTransfers": total number of transfers in the group, @@ -109,8 +109,8 @@ Returns the following values: "eta": estimated time in seconds until file transfer completion "name": name of the file, "percentage": progress of the file transfer in percent, - "speed": average speed over the whole transfer in bytes/sec, - "speedAvg": current speed in bytes/sec as an exponentially weighted moving average, + "speed": average speed over the whole transfer in bytes per second, + "speedAvg": current speed in bytes per second as an exponentially weighted moving average, "size": size of the file in bytes } ], diff --git a/fs/accounting/token_bucket.go b/fs/accounting/token_bucket.go index cafd55e14..0d9cad03c 100644 --- a/fs/accounting/token_bucket.go +++ b/fs/accounting/token_bucket.go @@ -92,7 +92,7 @@ func (tb *tokenBucket) StartTokenBucket(ctx context.Context) { tb.currLimit = ci.BwLimit.LimitAt(time.Now()) if tb.currLimit.Bandwidth.IsSet() { tb.curr = newTokenBucket(tb.currLimit.Bandwidth) - fs.Infof(nil, "Starting bandwidth limiter at %vBytes/s", &tb.currLimit.Bandwidth) + fs.Infof(nil, "Starting bandwidth limiter at %v Byte/s", &tb.currLimit.Bandwidth) // Start the SIGUSR2 signal handler to toggle bandwidth. // This function does nothing in windows systems. @@ -133,9 +133,9 @@ func (tb *tokenBucket) StartTokenTicker(ctx context.Context) { *targetBucket = newTokenBucket(limitNow.Bandwidth) if tb.toggledOff { fs.Logf(nil, "Scheduled bandwidth change. "+ - "Limit will be set to %vBytes/s when toggled on again.", &limitNow.Bandwidth) + "Limit will be set to %v Byte/s when toggled on again.", &limitNow.Bandwidth) } else { - fs.Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.Bandwidth) + fs.Logf(nil, "Scheduled bandwidth change. Limit set to %v Byte/s", &limitNow.Bandwidth) } } else { targetBucket._setOff() diff --git a/fs/config/configflags/configflags.go b/fs/config/configflags/configflags.go index c7b0d555a..988d5c8fe 100644 --- a/fs/config/configflags/configflags.go +++ b/fs/config/configflags/configflags.go @@ -97,8 +97,8 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) { flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit") flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR") - flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.") - flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable.") + flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in kByte/s, or use suffix b|k|M|G or a full timetable.") + flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kByte/s, or use suffix b|k|M|G or a full timetable.") flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.") flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.") flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList) diff --git a/fs/sync/sync_test.go b/fs/sync/sync_test.go index 823ca7759..6a8caec5f 100644 --- a/fs/sync/sync_test.go +++ b/fs/sync/sync_test.go @@ -1048,7 +1048,7 @@ func TestSyncWithMaxDuration(t *testing.T) { ci.Transfers = 1 defer accounting.TokenBucket.SetBwLimit(fs.BwPair{Tx: -1, Rx: -1}) - // 5 files of 60 bytes at 60 bytes/s 5 seconds + // 5 files of 60 bytes at 60 Byte/s 5 seconds testFiles := make([]fstest.Item, 5) for i := 0; i < len(testFiles); i++ { testFiles[i] = r.WriteFile(fmt.Sprintf("file%d", i), "------------------------------------------------------------", t1)