Spelling fixes

Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
This commit is contained in:
Josh Soref 2020-10-08 20:17:24 -04:00 committed by Nick Craig-Wood
parent 51a230d7fd
commit d0888edc0a
91 changed files with 161 additions and 161 deletions

View File

@ -86,7 +86,7 @@ git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo. git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message. git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo. git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com # For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
``` ```
## CI for your fork ## ## CI for your fork ##

View File

@ -1245,15 +1245,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
blob := o.getBlobReference() blob := o.getBlobReference()
ac := azblob.BlobAccessConditions{} ac := azblob.BlobAccessConditions{}
var dowloadResponse *azblob.DownloadResponse var downloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false) downloadResponse, err = blob.Download(ctx, offset, count, ac, false)
return o.fs.shouldRetry(err) return o.fs.shouldRetry(err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to open for download") return nil, errors.Wrap(err, "failed to open for download")
} }
in = dowloadResponse.Body(azblob.RetryReaderOptions{}) in = downloadResponse.Body(azblob.RetryReaderOptions{})
return in, nil return in, nil
} }
@ -1475,7 +1475,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75 // FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
// is merged the SDK can't upload a single blob of exactly the chunk // is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multpart upload to work around. // size, so upload with a multipart upload to work around.
// See: https://github.com/rclone/rclone/issues/2653 // See: https://github.com/rclone/rclone/issues/2653
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff) multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) { if size == int64(o.fs.opt.ChunkSize) {

View File

@ -1013,7 +1013,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return info.SharedLink.URL, err return info.SharedLink.URL, err
} }
// deletePermanently permenently deletes a trashed file // deletePermanently permanently deletes a trashed file
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error { func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
opts := rest.Opts{ opts := rest.Opts{
Method: "DELETE", Method: "DELETE",

View File

@ -1,4 +1,4 @@
// multpart upload for box // multipart upload for box
package box package box

View File

@ -147,7 +147,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
// If salt is "" we use a fixed salt just to make attackers lives // If salt is "" we use a fixed salt just to make attackers lives
// slighty harder than using no salt. // slighty harder than using no salt.
// //
// Note that empty passsword makes all 0x00 keys which is used in the // Note that empty password makes all 0x00 keys which is used in the
// tests. // tests.
func (c *Cipher) Key(password, salt string) (err error) { func (c *Cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak) const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)

View File

@ -159,7 +159,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(remote, name+":") { if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting") return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
} }
// Make sure to remove trailing . reffering to the current dir // Make sure to remove trailing . referring to the current dir
if path.Base(rpath) == "." { if path.Base(rpath) == "." {
rpath = strings.TrimSuffix(rpath, ".") rpath = strings.TrimSuffix(rpath, ".")
} }

View File

@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
} }
// wrap the object in a crypt for upload using the nonce we // wrap the object in a crypt for upload using the nonce we
// saved from the encryptor // saved from the encrypter
src := f.newObjectInfo(oi, nonce) src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods // Test ObjectInfo methods

View File

@ -2026,10 +2026,10 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote()) existingObj, err := f.NewObject(ctx, src.Remote())
switch err { switch err {
case nil: case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...) return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
return f.PutUnchecked(ctx, in, src, options...) return f.PutUnchecked(ctx, in, src, options...)

View File

@ -1193,7 +1193,7 @@ func (o *Object) Size() int64 {
// setMetadataFromEntry sets the fs data from a files.FileMetadata // setMetadataFromEntry sets the fs data from a files.FileMetadata
// //
// This isn't a complete set of metadata and has an inacurate date // This isn't a complete set of metadata and has an inaccurate date
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error { func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.id = info.Id o.id = info.Id
o.bytes = int64(info.Size) o.bytes = int64(info.Size)

View File

@ -306,10 +306,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote()) existingObj, err := f.NewObject(ctx, src.Remote())
switch err { switch err {
case nil: case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...) return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
return f.PutUnchecked(ctx, in, src, options...) return f.PutUnchecked(ctx, in, src, options...)

View File

@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String()) assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
}) })
// Check it is there in the date/month/year heirachy // Check it is there in the date/month/year hierarchy
// 2013-07-13 is the creation date of the folder // 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) { checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath) entries, err := f.List(ctx, objPath)

View File

@ -4,7 +4,7 @@ package hubic
// This uses the normal swift mechanism to update the credentials and // This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need // ignores the expires field returned by the Hubic API. This may need
// to be revisted after some actual experience. // to be revisited after some actual experience.
import ( import (
"context" "context"

View File

@ -153,9 +153,9 @@ type CustomerInfo struct {
AccountType string `json:"account_type"` AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"` SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"` Usage int64 `json:"usage"`
Qouta int64 `json:"quota"` Quota int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"` BusinessUsage int64 `json:"business_usage"`
BusinessQouta int64 `json:"business_quota"` BusinessQuota int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"` WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"` ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"` LockedCause interface{} `json:"locked_cause"`
@ -386,7 +386,7 @@ type Error struct {
Cause string `xml:"cause"` Cause string `xml:"cause"`
} }
// Error returns a string for the error and statistifes the error interface // Error returns a string for the error and satisfies the error interface
func (e *Error) Error() string { func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode) out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" { if e.Message != "" {

View File

@ -107,7 +107,7 @@ func init() {
} }
} }
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n") fmt.Printf("Use legacy authentication?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
if config.Confirm(false) { if config.Confirm(false) {
v1config(ctx, name, m) v1config(ctx, name, m)
} else { } else {
@ -230,7 +230,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
// v1config configure a jottacloud backend using legacy authentification // v1config configure a jottacloud backend using legacy authentication
func v1config(ctx context.Context, name string, m configmap.Mapper) { func v1config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config)) srv := rest.NewClient(fshttp.NewClient(fs.Config))
@ -323,7 +323,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
return deviceRegistration, err return deviceRegistration, err
} }
// doAuthV1 runs the actual token request for V1 authentification // doAuthV1 runs the actual token request for V1 authentication
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) { func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password // prepare out token request with username and password
values := url.Values{} values := url.Values{}
@ -365,7 +365,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
return token, err return token, err
} }
// v2config configure a jottacloud backend using the modern JottaCli token based authentification // v2config configure a jottacloud backend using the modern JottaCli token based authentication
func v2config(ctx context.Context, name string, m configmap.Mapper) { func v2config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config)) srv := rest.NewClient(fshttp.NewClient(fs.Config))
@ -405,7 +405,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
m.Set("configVersion", strconv.Itoa(configVersion)) m.Set("configVersion", strconv.Itoa(configVersion))
} }
// doAuthV2 runs the actual token request for V2 authentification // doAuthV2 runs the actual token request for V2 authentication
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) { func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64) loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil { if err != nil {
@ -1512,7 +1512,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
// If the file state is INCOMPLETE and CORRPUT, try to upload a then // If the file state is INCOMPLETE and CORRUPT, try to upload a then
if response.State != "COMPLETED" { if response.State != "COMPLETED" {
// how much do we still have to upload? // how much do we still have to upload?
remainingBytes := size - response.ResumePos remainingBytes := size - response.ResumePos

View File

@ -273,7 +273,7 @@ type Fs struct {
root string // root path root string // root path
opt Options // parsed options opt Options // parsed options
speedupGlobs []string // list of file name patterns eligible for speedup speedupGlobs []string // list of file name patterns eligible for speedup
speedupAny bool // true if all file names are aligible for speedup speedupAny bool // true if all file names are eligible for speedup
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // REST API client srv *rest.Client // REST API client
cli *http.Client // underlying HTTP client (for authorize) cli *http.Client // underlying HTTP client (for authorize)
@ -2214,7 +2214,7 @@ func (e *endHandler) handle(err error) error {
return io.EOF return io.EOF
} }
// serverPool backs server dispacher // serverPool backs server dispatcher
type serverPool struct { type serverPool struct {
pool pendingServerMap pool pendingServerMap
mu sync.Mutex mu sync.Mutex

View File

@ -221,7 +221,7 @@ func (f *Fs) setRoot(root string) {
f.rootBucket, f.rootDirectory = bucket.Split(f.root) f.rootBucket, f.rootDirectory = bucket.Split(f.root)
} }
// NewFs contstructs an Fs from the path, bucket:path // NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)

View File

@ -254,7 +254,7 @@ type MoveItemRequest struct {
//Always Type:view and Scope:anonymous for public sharing //Always Type:view and Scope:anonymous for public sharing
type CreateShareLinkRequest struct { type CreateShareLinkRequest struct {
Type string `json:"type"` //Link type in View, Edit or Embed Type string `json:"type"` //Link type in View, Edit or Embed
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization Scope string `json:"scope,omitempty"` //Optional. Scope in anonymous, organization
} }
//CreateShareLinkResponse is the response from CreateShareLinkRequest //CreateShareLinkResponse is the response from CreateShareLinkRequest

View File

@ -1125,7 +1125,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Special treatment for a 0 length upload. This doesn't work // Special treatment for a 0 length upload. This doesn't work
// with PUT even with Content-Length set (by setting // with PUT even with Content-Length set (by setting
// opts.Body=0), so upload it as a multpart form POST with // opts.Body=0), so upload it as a multipart form POST with
// Content-Length set. // Content-Length set.
if size == 0 { if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf) formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)

View File

@ -236,10 +236,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err) // defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
exisitingObj, err := f.NewObject(ctx, src.Remote()) existingObj, err := f.NewObject(ctx, src.Remote())
switch err { switch err {
case nil: case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...) return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
return f.PutUnchecked(ctx, in, src, options...) return f.PutUnchecked(ctx, in, src, options...)

View File

@ -115,7 +115,7 @@ func (o *Object) MimeType(ctx context.Context) string {
// setMetadataFromEntry sets the fs data from a putio.File // setMetadataFromEntry sets the fs data from a putio.File
// //
// This isn't a complete set of metadata and has an inacurate date // This isn't a complete set of metadata and has an inaccurate date
func (o *Object) setMetadataFromEntry(info putio.File) error { func (o *Object) setMetadataFromEntry(info putio.File) error {
o.file = &info o.file = &info
o.modtime = info.UpdatedAt.Time o.modtime = info.UpdatedAt.Time

View File

@ -104,7 +104,7 @@ enough memory, then increasing this will speed up the transfers.`,
This is the number of chunks of the same file that are uploaded This is the number of chunks of the same file that are uploaded
concurrently. concurrently.
NB if you set this to > 1 then the checksums of multpart uploads NB if you set this to > 1 then the checksums of multipart uploads
become corrupted (the uploads themselves are not corrupted though). become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link If you are uploading small numbers of large file over high speed link

View File

@ -2364,7 +2364,7 @@ All the objects shown will be marked for restore, then
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
It returns a list of status dictionaries with Remote and Status It returns a list of status dictionaries with Remote and Status
keys. The Status will be OK if it was successfull or an error message keys. The Status will be OK if it was successful or an error message
if not. if not.
[ [
@ -2529,7 +2529,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key) // listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
// //
// Note that rather lazily we treat key as a prefix so it matches // Note that rather lazily we treat key as a prefix so it matches
// directories and objects. This could suprise the user if they ask // directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey" // for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) { func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
var ( var (
@ -3062,7 +3062,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// read the md5sum if available // read the md5sum if available
// - for non multpart // - for non multipart
// - so we can add a ContentMD5 // - so we can add a ContentMD5
// - for multipart provided checksums aren't disabled // - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash // - so we can add the md5sum in the metadata as metaMD5Hash

View File

@ -952,7 +952,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
session, err := c.sshClient.NewSession() session, err := c.sshClient.NewSession()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "run: get SFTP sessiion") return nil, errors.Wrap(err, "run: get SFTP session")
} }
defer func() { defer func() {
_ = session.Close() _ = session.Close()

View File

@ -95,7 +95,7 @@ type UploadSpecification struct {
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted. IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported.
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true. ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server

View File

@ -1090,7 +1090,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
} else if err != nil { } else if err != nil {
return nil, errors.Wrap(err, "copy: failed to examine destination dir") return nil, errors.Wrap(err, "copy: failed to examine destination dir")
} else { } else {
// otherwise need to copy via a temporary directlry // otherwise need to copy via a temporary directory
} }
} }

View File

@ -350,7 +350,7 @@ func (f *Fs) getAuth(req *http.Request) (err error) {
// if have auth, check it is in date // if have auth, check it is in date
if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway { if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway {
// Get the auth token // Get the auth token
f.srv.SetSigner(nil) // temporariliy remove the signer so we don't infinitely recurse f.srv.SetSigner(nil) // temporarily remove the signer so we don't infinitely recurse
err = f.getAuthToken(ctx) err = f.getAuthToken(ctx)
f.srv.SetSigner(f.getAuth) // replace signer f.srv.SetSigner(f.getAuth) // replace signer
if err != nil { if err != nil {

View File

@ -67,12 +67,12 @@ func init() {
log.Fatalf("Couldn't create access grant: %v", err) log.Fatalf("Couldn't create access grant: %v", err)
} }
serialziedAccess, err := access.Serialize() serializedAccess, err := access.Serialize()
if err != nil { if err != nil {
log.Fatalf("Couldn't serialize access grant: %v", err) log.Fatalf("Couldn't serialize access grant: %v", err)
} }
configMapper.Set("satellite_address", satellite) configMapper.Set("satellite_address", satellite)
configMapper.Set("access_grant", serialziedAccess) configMapper.Set("access_grant", serializedAccess)
} else if provider == existingProvider { } else if provider == existingProvider {
config.FileDeleteKey(name, "satellite_address") config.FileDeleteKey(name, "satellite_address")
config.FileDeleteKey(name, "api_key") config.FileDeleteKey(name, "api_key")

View File

@ -61,7 +61,7 @@ func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
return p.epall(ctx, upstreams, path) return p.epall(ctx, upstreams, path)
} }
// ActionEntries is ACTION category policy but receivng a set of candidate entries // ActionEntries is ACTION category policy but receiving a set of candidate entries
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 { if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound

View File

@ -106,7 +106,7 @@ func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
return p.mfs(upstreams) return p.mfs(upstreams)
} }
// SearchEntries is SEARCH category policy but receivng a set of candidate entries // SearchEntries is SEARCH category policy but receiving a set of candidate entries
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 { if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound

View File

@ -14,7 +14,7 @@ func init() {
// FF stands for first found // FF stands for first found
// Search category: same as epff. // Search category: same as epff.
// Action category: same as epff. // Action category: same as epff.
// Create category: Given the order of the candiates, act on the first one found. // Create category: Given the order of the candidates, act on the first one found.
type FF struct { type FF struct {
EpFF EpFF
} }

View File

@ -60,7 +60,7 @@ func init() {
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Upstreams fs.SpaceSepList `config:"upstreams"` Upstreams fs.SpaceSepList `config:"upstreams"`
Remotes fs.SpaceSepList `config:"remotes"` // Depreated Remotes fs.SpaceSepList `config:"remotes"` // Deprecated
ActionPolicy string `config:"action_policy"` ActionPolicy string `config:"action_policy"`
CreatePolicy string `config:"create_policy"` CreatePolicy string `config:"create_policy"`
SearchPolicy string `config:"search_policy"` SearchPolicy string `config:"search_policy"`
@ -567,7 +567,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entriess := make([][]upstream.Entry, len(f.upstreams)) entriesList := make([][]upstream.Entry, len(f.upstreams))
errs := Errors(make([]error, len(f.upstreams))) errs := Errors(make([]error, len(f.upstreams)))
multithread(len(f.upstreams), func(i int) { multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i] u := f.upstreams[i]
@ -580,7 +580,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for j, e := range entries { for j, e := range entries {
uEntries[j], _ = u.WrapEntry(e) uEntries[j], _ = u.WrapEntry(e)
} }
entriess[i] = uEntries entriesList[i] = uEntries
}) })
if len(errs) == len(errs.FilterNil()) { if len(errs) == len(errs.FilterNil()) {
errs = errs.Map(func(e error) error { errs = errs.Map(func(e error) error {
@ -594,7 +594,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
return nil, errs.Err() return nil, errs.Err()
} }
return f.mergeDirEntries(entriess) return f.mergeDirEntries(entriesList)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@ -614,7 +614,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
var entriess [][]upstream.Entry var entriesList [][]upstream.Entry
errs := Errors(make([]error, len(f.upstreams))) errs := Errors(make([]error, len(f.upstreams)))
var mutex sync.Mutex var mutex sync.Mutex
multithread(len(f.upstreams), func(i int) { multithread(len(f.upstreams), func(i int) {
@ -626,7 +626,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
uEntries[j], _ = u.WrapEntry(e) uEntries[j], _ = u.WrapEntry(e)
} }
mutex.Lock() mutex.Lock()
entriess = append(entriess, uEntries) entriesList = append(entriesList, uEntries)
mutex.Unlock() mutex.Unlock()
return nil return nil
} }
@ -653,7 +653,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
return errs.Err() return errs.Err()
} }
entries, err := f.mergeDirEntries(entriess) entries, err := f.mergeDirEntries(entriesList)
if err != nil { if err != nil {
return err return err
} }
@ -724,9 +724,9 @@ func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
return f.searchPolicy.SearchEntries(entries...) return f.searchPolicy.SearchEntries(entries...)
} }
func (f *Fs) mergeDirEntries(entriess [][]upstream.Entry) (fs.DirEntries, error) { func (f *Fs) mergeDirEntries(entriesList [][]upstream.Entry) (fs.DirEntries, error) {
entryMap := make(map[string]([]upstream.Entry)) entryMap := make(map[string]([]upstream.Entry))
for _, en := range entriess { for _, en := range entriesList {
if en == nil { if en == nil {
continue continue
} }

View File

@ -52,7 +52,7 @@ type Object struct {
f *Fs f *Fs
} }
// Entry describe a warpped fs.DirEntry interface with the // Entry describe a wrapped fs.DirEntry interface with the
// information of upstream Fs // information of upstream Fs
type Entry interface { type Entry interface {
fs.DirEntry fs.DirEntry

View File

@ -12,7 +12,7 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestWebdavNexcloud:", RemoteName: "TestWebdavNextcloud:",
NilObject: (*webdav.Object)(nil), NilObject: (*webdav.Object)(nil),
}) })
} }

View File

@ -20,7 +20,7 @@ type ResourceInfoRequestOptions struct {
Fields []string Fields []string
} }
//ResourceInfoResponse struct is returned by the API for metedata requests. //ResourceInfoResponse struct is returned by the API for metadata requests.
type ResourceInfoResponse struct { type ResourceInfoResponse struct {
PublicKey string `json:"public_key"` PublicKey string `json:"public_key"`
Name string `json:"name"` Name string `json:"name"`

View File

@ -141,7 +141,7 @@ def main():
for name in sorted(bugfixes.keys()): for name in sorted(bugfixes.keys()):
out(name) out(name)
# Read old changlog and split # Read old changelog and split
with open("docs/content/changelog.md") as fd: with open("docs/content/changelog.md") as fd:
old_changelog = fd.read() old_changelog = fd.read()
heading = "# Changelog" heading = "# Changelog"

View File

@ -19,7 +19,7 @@ var (
// Flags // Flags
numberOfFiles = flag.Int("n", 1000, "Number of files to create") numberOfFiles = flag.Int("n", 1000, "Number of files to create")
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory") averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy") maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory hierarchy")
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create") minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create") maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create") minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
@ -61,7 +61,7 @@ func fileName() (name string) {
return name return name
} }
// dir is a directory in the directory heirachy being built up // dir is a directory in the directory hierarchy being built up
type dir struct { type dir struct {
name string name string
depth int depth int
@ -69,7 +69,7 @@ type dir struct {
parent *dir parent *dir
} }
// Create a random directory heirachy under d // Create a random directory hierarchy under d
func (d *dir) createDirectories() { func (d *dir) createDirectories() {
for totalDirectories < directoriesToCreate { for totalDirectories < directoriesToCreate {
newDir := &dir{ newDir := &dir{
@ -91,7 +91,7 @@ func (d *dir) createDirectories() {
return return
} }
// list the directory heirachy // list the directory hierarchy
func (d *dir) list(path string, output []string) []string { func (d *dir) list(path string, output []string) []string {
dirPath := filepath.Join(path, d.name) dirPath := filepath.Join(path, d.name)
output = append(output, dirPath) output = append(output, dirPath)

View File

@ -1,4 +1,4 @@
// Package cmd implemnts the rclone command // Package cmd implements the rclone command
// //
// It is in a sub package so it's internals can be re-used elsewhere // It is in a sub package so it's internals can be re-used elsewhere
package cmd package cmd

View File

@ -270,7 +270,7 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
return fsys.closeHandle(fh) return fsys.closeHandle(fh)
} }
// Statfs reads overall stats on the filessystem // Statfs reads overall stats on the filesystem
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) { func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc) defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
const blockSize = 4096 const blockSize = 4096

View File

@ -111,7 +111,7 @@ whether the password is already obscured or not and put unobscured
passwords into the config file. If you want to be 100% certain that passwords into the config file. If you want to be 100% certain that
the passwords get obscured then use the "--obscure" flag, or if you the passwords get obscured then use the "--obscure" flag, or if you
are 100% certain you are already passing obscured passwords then use are 100% certain you are already passing obscured passwords then use
"--no-obscure". You can also set osbscured passwords using the "--no-obscure". You can also set obscured passwords using the
"rclone config password" command. "rclone config password" command.
` `

View File

@ -49,7 +49,7 @@ func init() {
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.") flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.") flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.") flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.") flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{

View File

@ -45,7 +45,7 @@ func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle {
} }
} }
// Check interface satistfied // Check interface satisfied
var _ fusefs.FileHandle = (*FileHandle)(nil) var _ fusefs.FileHandle = (*FileHandle)(nil)
// The String method is for debug printing. // The String method is for debug printing.

View File

@ -258,7 +258,7 @@ var _ fusefs.DirStream = (*dirStream)(nil)
// Readdir opens a stream of directory entries. // Readdir opens a stream of directory entries.
// //
// Readdir essentiallly returns a list of strings, and it is allowed // Readdir essentially returns a list of strings, and it is allowed
// for Readdir to return different results from Lookup. For example, // for Readdir to return different results from Lookup. For example,
// you can return nothing for Readdir ("ls my-fuse-mount" is empty), // you can return nothing for Readdir ("ls my-fuse-mount" is empty),
// while still implementing Lookup ("ls my-fuse-mount/a-specific-file" // while still implementing Lookup ("ls my-fuse-mount/a-specific-file"

View File

@ -75,7 +75,7 @@ func helpText() (tr []string) {
" d delete file/directory", " d delete file/directory",
} }
if !clipboard.Unsupported { if !clipboard.Unsupported {
tr = append(tr, " y copy current path to clipbard") tr = append(tr, " y copy current path to clipboard")
} }
tr = append(tr, []string{ tr = append(tr, []string{
" Y display current path", " Y display current path",

View File

@ -208,7 +208,7 @@ func TestGET(t *testing.T) {
body, err := ioutil.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err) require.NoError(t, err)
// Check we got a Last-Modifed header and that it is a valid date // Check we got a Last-Modified header and that it is a valid date
if test.Status == http.StatusOK || test.Status == http.StatusPartialContent { if test.Status == http.StatusOK || test.Status == http.StatusPartialContent {
lastModified := resp.Header.Get("Last-Modified") lastModified := resp.Header.Get("Last-Modified")
assert.NotEqual(t, "", lastModified, test.Golden) assert.NotEqual(t, "", lastModified, test.Golden)

View File

@ -61,7 +61,7 @@ to be used within the template to server pages:
| .Name | The full path of a file/directory. | | .Name | The full path of a file/directory. |
| .Title | Directory listing of .Name | | .Title | Directory listing of .Name |
| .Sort | The current sort used. This is changeable via ?sort= parameter | | .Sort | The current sort used. This is changeable via ?sort= parameter |
| | Sort Options: namedirfist,name,size,time (default namedirfirst) | | | Sort Options: namedirfirst,name,size,time (default namedirfirst) |
| .Order | The current ordering used. This is changeable via ?order= parameter | | .Order | The current ordering used. This is changeable via ?order= parameter |
| | Order Options: asc,desc (default asc) | | | Order Options: asc,desc (default asc) |
| .Query | Currently unused. | | .Query | Currently unused. |

View File

@ -132,7 +132,7 @@ func Error(what interface{}, w http.ResponseWriter, text string, err error) {
} }
} }
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfist/asc // ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfirst/asc
func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory { func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory {
d.Sort = sortParm d.Sort = sortParm
d.Order = orderParm d.Order = orderParm

View File

@ -27,7 +27,7 @@ var Help = strings.Replace(`
If you supply the parameter |--auth-proxy /path/to/program| then If you supply the parameter |--auth-proxy /path/to/program| then
rclone will use that program to generate backends on the fly which rclone will use that program to generate backends on the fly which
then are used to authenticate incoming requests. This uses a simple then are used to authenticate incoming requests. This uses a simple
JSON based protocl with input on STDIN and output on STDOUT. JSON based protocol with input on STDIN and output on STDOUT.
**PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used **PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used
together, if |--auth-proxy| is set the authorized keys option will be together, if |--auth-proxy| is set the authorized keys option will be

View File

@ -64,7 +64,7 @@ func TestTouchWithTimestamp(t *testing.T) {
checkFile(t, r.Fremote, srcFileName, "") checkFile(t, r.Fremote, srcFileName, "")
} }
func TestTouchWithLognerTimestamp(t *testing.T) { func TestTouchWithLongerTimestamp(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()

View File

@ -63,7 +63,7 @@ description: "Rclone Changelog"
* Add reverse proxy pluginsHandler for serving plugins (Chaitanya Bankanhal) * Add reverse proxy pluginsHandler for serving plugins (Chaitanya Bankanhal)
* Add `mount/listmounts` option for listing current mounts (Chaitanya Bankanhal) * Add `mount/listmounts` option for listing current mounts (Chaitanya Bankanhal)
* Add `operations/uploadfile` to upload a file through rc using encoding multipart/form-data (Chaitanya Bankanhal) * Add `operations/uploadfile` to upload a file through rc using encoding multipart/form-data (Chaitanya Bankanhal)
* Add `core/copmmand` to execute rclone terminal commands. (Chaitanya Bankanhal) * Add `core/command` to execute rclone terminal commands. (Chaitanya Bankanhal)
* `rclone check` * `rclone check`
* Add reporting of filenames for same/missing/changed (Nick Craig-Wood) * Add reporting of filenames for same/missing/changed (Nick Craig-Wood)
* Make check command obey `--dry-run`/`-i`/`--interactive` (Nick Craig-Wood) * Make check command obey `--dry-run`/`-i`/`--interactive` (Nick Craig-Wood)
@ -172,7 +172,7 @@ description: "Rclone Changelog"
* Google Cloud Storage * Google Cloud Storage
* Add support for anonymous access (Kai Lüke) * Add support for anonymous access (Kai Lüke)
* Jottacloud * Jottacloud
* Bring back legacy authentification for use with whitelabel versions (buengese) * Bring back legacy authentication for use with whitelabel versions (buengese)
* Switch to new api root - also implement a very ugly workaround for the DirMove failures (buengese) * Switch to new api root - also implement a very ugly workaround for the DirMove failures (buengese)
* Onedrive * Onedrive
* Rework cancel of multipart uploads on rclone exit (Nick Craig-Wood) * Rework cancel of multipart uploads on rclone exit (Nick Craig-Wood)
@ -322,7 +322,7 @@ all the docs and Edward Barker for helping re-write the front page.
* Add `--header` flag to add HTTP headers to every HTTP transaction (Nick Craig-Wood) * Add `--header` flag to add HTTP headers to every HTTP transaction (Nick Craig-Wood)
* Add `--check-first` to do all checking before starting transfers (Nick Craig-Wood) * Add `--check-first` to do all checking before starting transfers (Nick Craig-Wood)
* Add `--track-renames-strategy` for configurable matching criteria for `--track-renames` (Bernd Schoolmann) * Add `--track-renames-strategy` for configurable matching criteria for `--track-renames` (Bernd Schoolmann)
* Add `--cutoff-mode` hard,soft,catious (Shing Kit Chan & Franklyn Tackitt) * Add `--cutoff-mode` hard,soft,cautious (Shing Kit Chan & Franklyn Tackitt)
* Filter flags (eg `--files-from -`) can read from stdin (fishbullet) * Filter flags (eg `--files-from -`) can read from stdin (fishbullet)
* Add `--error-on-no-transfer` option (Jon Fautley) * Add `--error-on-no-transfer` option (Jon Fautley)
* Implement `--order-by xxx,mixed` for copying some small and some big files (Nick Craig-Wood) * Implement `--order-by xxx,mixed` for copying some small and some big files (Nick Craig-Wood)
@ -605,7 +605,7 @@ all the docs and Edward Barker for helping re-write the front page.
* dbhashsum: Stop it returning UNSUPPORTED on dropbox (Nick Craig-Wood) * dbhashsum: Stop it returning UNSUPPORTED on dropbox (Nick Craig-Wood)
* dedupe: Add missing modes to help string (Nick Craig-Wood) * dedupe: Add missing modes to help string (Nick Craig-Wood)
* operations * operations
* Fix dedupe continuing on errors like insufficientFilePermisson (SezalAgrawal) * Fix dedupe continuing on errors like insufficientFilePersimmon (SezalAgrawal)
* Clear accounting before low level retry (Maciej Zimnoch) * Clear accounting before low level retry (Maciej Zimnoch)
* Write debug message when hashes could not be checked (Ole Schütt) * Write debug message when hashes could not be checked (Ole Schütt)
* Move interface assertion to tests to remove pflag dependency (Nick Craig-Wood) * Move interface assertion to tests to remove pflag dependency (Nick Craig-Wood)
@ -669,7 +669,7 @@ all the docs and Edward Barker for helping re-write the front page.
* S3 * S3
* Re-implement multipart upload to fix memory issues (Nick Craig-Wood) * Re-implement multipart upload to fix memory issues (Nick Craig-Wood)
* Add `--s3-copy-cutoff` for size to switch to multipart copy (Nick Craig-Wood) * Add `--s3-copy-cutoff` for size to switch to multipart copy (Nick Craig-Wood)
* Add new region Asia Patific (Hong Kong) (Outvi V) * Add new region Asia Pacific (Hong Kong) (Outvi V)
* Reduce memory usage streaming files by reducing max stream upload size (Nick Craig-Wood) * Reduce memory usage streaming files by reducing max stream upload size (Nick Craig-Wood)
* Add `--s3-list-chunk` option for bucket listing (Thomas Kriechbaumer) * Add `--s3-list-chunk` option for bucket listing (Thomas Kriechbaumer)
* Force path style bucket access to off for AWS deprecation (Nick Craig-Wood) * Force path style bucket access to off for AWS deprecation (Nick Craig-Wood)
@ -930,7 +930,7 @@ all the docs and Edward Barker for helping re-write the front page.
* rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood) * rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood)
* rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood) * rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood)
* Mount * Mount
* Default `--deamon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood) * Default `--daemon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
* Update docs to show mounting from root OK for bucket based (Nick Craig-Wood) * Update docs to show mounting from root OK for bucket based (Nick Craig-Wood)
* Remove nonseekable flag from write files (Nick Craig-Wood) * Remove nonseekable flag from write files (Nick Craig-Wood)
* VFS * VFS
@ -1093,7 +1093,7 @@ all the docs and Edward Barker for helping re-write the front page.
* Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood) * Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood)
* Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood) * Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood)
* build: * build:
* Switch to semvar compliant version tags to be go modules compliant (Nick Craig-Wood) * Switch to semver compliant version tags to be go modules compliant (Nick Craig-Wood)
* Update to use go1.12.x for the build (Nick Craig-Wood) * Update to use go1.12.x for the build (Nick Craig-Wood)
* serve dlna: Add connection manager service description to improve compatibility (Dan Walters) * serve dlna: Add connection manager service description to improve compatibility (Dan Walters)
* lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood) * lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood)

View File

@ -205,7 +205,7 @@ or the latest version (equivalent to the beta) with
These will build the binary in `$(go env GOPATH)/bin` These will build the binary in `$(go env GOPATH)/bin`
(`~/go/bin/rclone` by default) after downloading the source to the go (`~/go/bin/rclone` by default) after downloading the source to the go
module cache. Note - do **not** use the `-u` flag here. This causes go module cache. Note - do **not** use the `-u` flag here. This causes go
to try to update the depencencies that rclone uses and sometimes these to try to update the dependencies that rclone uses and sometimes these
don't work with the current version of rclone. don't work with the current version of rclone.
## Installation with Ansible ## ## Installation with Ansible ##

View File

@ -27,8 +27,8 @@ Note that the web interface may refer to this token as a JottaCli token.
### Legacy Setup ### Legacy Setup
If you are using one of the whitelabel versions (Elgiganten, Com Hem Cloud) you may not have the option If you are using one of the whitelabel versions (Elgiganten, Com Hem Cloud) you may not have the option
to generate a CLI token. In this case you'll have to use the legacy authentification. To to this select to generate a CLI token. In this case you'll have to use the legacy authentication. To to this select
yes when the setup asks for legacy authentification and enter your username and password. yes when the setup asks for legacy authentication and enter your username and password.
The rest of the setup is identical to the default setup. The rest of the setup is identical to the default setup.
Here is an example of how to make a remote called `remote` with the default setup. First run: Here is an example of how to make a remote called `remote` with the default setup. First run:
@ -59,7 +59,7 @@ y) Yes
n) No n) No
y/n> n y/n> n
Remote config Remote config
Use legacy authentification?. Use legacy authentication?.
This is only required for certain whitelabel versions of Jottacloud and not recommended for normal users. This is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.
y) Yes y) Yes
n) No (default) n) No (default)

View File

@ -402,7 +402,7 @@ If the server can't do `CleanUp` then `rclone cleanup` will return an
error. error.
‡‡ Note that while Box implements this it has to delete every file ‡‡ Note that while Box implements this it has to delete every file
idividually so it will be slower than emptying the trash via the WebUI individually so it will be slower than emptying the trash via the WebUI
### ListR ### ### ListR ###

View File

@ -259,7 +259,7 @@ Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded This is the number of chunks of the same file that are uploaded
concurrently. concurrently.
NB if you set this to > 1 then the checksums of multpart uploads NB if you set this to > 1 then the checksums of multipart uploads
become corrupted (the uploads themselves are not corrupted though). become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link If you are uploading small numbers of large file over high speed link

View File

@ -582,7 +582,7 @@ Region to connect to.
- Asia Pacific (Mumbai) - Asia Pacific (Mumbai)
- Needs location constraint ap-south-1. - Needs location constraint ap-south-1.
- "ap-east-1" - "ap-east-1"
- Asia Patific (Hong Kong) Region - Asia Pacific (Hong Kong) Region
- Needs location constraint ap-east-1. - Needs location constraint ap-east-1.
- "sa-east-1" - "sa-east-1"
- South America (Sao Paulo) Region - South America (Sao Paulo) Region
@ -1493,7 +1493,7 @@ All the objects shown will be marked for restore, then
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
It returns a list of status dictionaries with Remote and Status It returns a list of status dictionaries with Remote and Status
keys. The Status will be OK if it was successfull or an error message keys. The Status will be OK if it was successful or an error message
if not. if not.
[ [
@ -1794,7 +1794,7 @@ Choose a number from below, or type in your own value
secret_access_key> <> secret_access_key> <>
``` ```
6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an enpoint address. 6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an endpoint address.
``` ```
Endpoint for IBM COS S3 API. Endpoint for IBM COS S3 API.
Specify if using an IBM COS On Premise. Specify if using an IBM COS On Premise.
@ -1855,7 +1855,7 @@ Choose a number from below, or type in your own value
location_constraint>1 location_constraint>1
``` ```
9. Specify a canned ACL. IBM Cloud (Strorage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs. 9. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
``` ```
Canned ACL used when creating buckets and/or storing objects in S3. Canned ACL used when creating buckets and/or storing objects in S3.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl

View File

@ -65,7 +65,7 @@ d) Delete this remote
y/e/d> y y/e/d> y
``` ```
### Setup with API key and passhprase ### Setup with API key and passphrase
``` ```
No remotes found - make a new one No remotes found - make a new one

View File

@ -450,7 +450,7 @@ func shortenName(in string, size int) string {
return in return in
} }
name := []rune(in) name := []rune(in)
size-- // don't count elipsis rune size-- // don't count ellipsis rune
suffixLength := size / 2 suffixLength := size / 2
prefixLength := size - suffixLength prefixLength := size - suffixLength
suffixStart := len(name) - suffixLength suffixStart := len(name) - suffixLength

View File

@ -99,7 +99,7 @@ func StartTokenTicker() {
}() }()
} }
// limitBandwith sleeps for the correct amount of time for the passage // limitBandwidth sleeps for the correct amount of time for the passage
// of n bytes according to the current bandwidth limit // of n bytes according to the current bandwidth limit
func limitBandwidth(n int) { func limitBandwidth(n int) {
tokenBucketMu.Lock() tokenBucketMu.Lock()

View File

@ -91,7 +91,7 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string { func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string {
tm.mu.RLock() tm.mu.RLock()
defer tm.mu.RUnlock() defer tm.mu.RUnlock()
strngs := make([]string, 0, len(tm.items)) stringList := make([]string, 0, len(tm.items))
for _, tr := range tm._sortedSlice() { for _, tr := range tm._sortedSlice() {
if exclude != nil { if exclude != nil {
exclude.mu.RLock() exclude.mu.RLock()
@ -111,9 +111,9 @@ func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string
tm.name, tm.name,
) )
} }
strngs = append(strngs, " * "+out) stringList = append(stringList, " * "+out)
} }
return strings.Join(strngs, "\n") return strings.Join(stringList, "\n")
} }
// progress returns total bytes read as well as the size. // progress returns total bytes read as well as the size.

View File

@ -80,7 +80,7 @@ func parseWeekday(dayOfWeek string) (int, error) {
// Set the bandwidth timetable. // Set the bandwidth timetable.
func (x *BwTimetable) Set(s string) error { func (x *BwTimetable) Set(s string) error {
// The timetable is formatted as: // The timetable is formatted as:
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,banwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off" // "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,bandwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
// If only a single bandwidth identifier is provided, we assume constant bandwidth. // If only a single bandwidth identifier is provided, we assume constant bandwidth.
if len(s) == 0 { if len(s) == 0 {

View File

@ -38,7 +38,7 @@ type ChunkedReader struct {
// //
// An initialChunkSize of <= 0 will disable chunked reading. // An initialChunkSize of <= 0 will disable chunked reading.
// If maxChunkSize is greater than initialChunkSize, the chunk size will be // If maxChunkSize is greater than initialChunkSize, the chunk size will be
// doubled after each chunk read with a maximun of maxChunkSize. // doubled after each chunk read with a maximum of maxChunkSize.
// A Seek or RangeSeek will reset the chunk size to it's initial value // A Seek or RangeSeek will reset the chunk size to it's initial value
func New(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader { func New(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader {
if initialChunkSize <= 0 { if initialChunkSize <= 0 {

View File

@ -62,7 +62,7 @@ type ConfigInfo struct {
DeleteMode DeleteMode DeleteMode DeleteMode
MaxDelete int64 MaxDelete int64
TrackRenames bool // Track file renames. TrackRenames bool // Track file renames.
TrackRenamesStrategy string // Comma separated list of stratgies used to track renames TrackRenamesStrategy string // Comma separated list of strategies used to track renames
LowLevelRetries int LowLevelRetries int
UpdateOlder bool // Skip files that are newer on the destination UpdateOlder bool // Skip files that are newer on the destination
NoGzip bool // Disable compression NoGzip bool // Disable compression

View File

@ -177,7 +177,7 @@ func TestNewRemoteName(t *testing.T) {
assert.Equal(t, "newname", NewRemoteName()) assert.Equal(t, "newname", NewRemoteName())
} }
func TestCreateUpatePasswordRemote(t *testing.T) { func TestCreateUpdatePasswordRemote(t *testing.T) {
defer testConfigFile(t, "update.conf")() defer testConfigFile(t, "update.conf")()
for _, doObscure := range []bool{false, true} { for _, doObscure := range []bool{false, true} {

View File

@ -73,7 +73,7 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err
rc, err := NewReOpen(ctx, mc.src, fs.Config.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1}) rc, err := NewReOpen(ctx, mc.src, fs.Config.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1})
if err != nil { if err != nil {
return errors.Wrap(err, "multpart copy: failed to open source") return errors.Wrap(err, "multipart copy: failed to open source")
} }
defer fs.CheckClose(rc, &err) defer fs.CheckClose(rc, &err)
@ -89,29 +89,29 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err
if nr > 0 { if nr > 0 {
err = mc.acc.AccountRead(nr) err = mc.acc.AccountRead(nr)
if err != nil { if err != nil {
return errors.Wrap(err, "multpart copy: accounting failed") return errors.Wrap(err, "multipart copy: accounting failed")
} }
nw, ew := mc.wc.WriteAt(buf[0:nr], offset) nw, ew := mc.wc.WriteAt(buf[0:nr], offset)
if nw > 0 { if nw > 0 {
offset += int64(nw) offset += int64(nw)
} }
if ew != nil { if ew != nil {
return errors.Wrap(ew, "multpart copy: write failed") return errors.Wrap(ew, "multipart copy: write failed")
} }
if nr != nw { if nr != nw {
return errors.Wrap(io.ErrShortWrite, "multpart copy") return errors.Wrap(io.ErrShortWrite, "multipart copy")
} }
} }
if er != nil { if er != nil {
if er != io.EOF { if er != io.EOF {
return errors.Wrap(er, "multpart copy: read failed") return errors.Wrap(er, "multipart copy: read failed")
} }
break break
} }
} }
if offset != end { if offset != end {
return errors.Errorf("multpart copy: wrote %d bytes but expected to write %d", offset-start, end-start) return errors.Errorf("multipart copy: wrote %d bytes but expected to write %d", offset-start, end-start)
} }
fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v finished", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start)) fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v finished", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start))
@ -163,7 +163,7 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object,
// create write file handle // create write file handle
mc.wc, err = openWriterAt(gCtx, remote, mc.size) mc.wc, err = openWriterAt(gCtx, remote, mc.size)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "multpart copy: failed to open destination") return nil, errors.Wrap(err, "multipart copy: failed to open destination")
} }
fs.Debugf(src, "Starting multi-thread copy with %d parts of size %v", mc.streams, fs.SizeSuffix(mc.partSize)) fs.Debugf(src, "Starting multi-thread copy with %d parts of size %v", mc.streams, fs.SizeSuffix(mc.partSize))

View File

@ -32,7 +32,7 @@ type pipe struct {
func newPipe(orderBy string, stats func(items int, totalSize int64), maxBacklog int) (*pipe, error) { func newPipe(orderBy string, stats func(items int, totalSize int64), maxBacklog int) (*pipe, error) {
if maxBacklog < 0 { if maxBacklog < 0 {
maxBacklog = (1 << (bits.UintSize - 1)) - 1 // largest posititive int maxBacklog = (1 << (bits.UintSize - 1)) - 1 // largest positive int
} }
less, fraction, err := newLess(orderBy) less, fraction, err := newLess(orderBy)
if err != nil { if err != nil {

View File

@ -40,7 +40,7 @@ type syncCopyMove struct {
deletersWg sync.WaitGroup // for delete before go routine deletersWg sync.WaitGroup // for delete before go routine
deleteFilesCh chan fs.Object // channel to receive deletes if delete before deleteFilesCh chan fs.Object // channel to receive deletes if delete before
trackRenames bool // set if we should do server side renames trackRenames bool // set if we should do server side renames
trackRenamesStrategy trackRenamesStrategy // stratgies used for tracking renames trackRenamesStrategy trackRenamesStrategy // strategies used for tracking renames
dstFilesMu sync.Mutex // protect dstFiles dstFilesMu sync.Mutex // protect dstFiles
dstFiles map[string]fs.Object // dst files, always filled dstFiles map[string]fs.Object // dst files, always filled
srcFiles map[string]fs.Object // src files, only used if deleteBefore srcFiles map[string]fs.Object // src files, only used if deleteBefore

View File

@ -205,7 +205,7 @@ func (dm *dirMap) add(dir string, sent bool) {
if !sent { if !sent {
return return
} }
// currenSent == false && sent == true so needs overriding // currentSent == false && sent == true so needs overriding
} }
dm.m[dir] = sent dm.m[dir] = sent
// Add parents in as unsent // Add parents in as unsent

View File

@ -254,7 +254,7 @@ func (r *Run) WriteObjectTo(ctx context.Context, f fs.Fs, remote, content string
} }
r.Mkdir(ctx, f) r.Mkdir(ctx, f)
// caclulate all hashes f supports for content // calculate all hashes f supports for content
hash, err := hash.NewMultiHasherTypes(f.Hashes()) hash, err := hash.NewMultiHasherTypes(f.Hashes())
if err != nil { if err != nil {
r.Fatalf("Failed to make new multi hasher: %v", err) r.Fatalf("Failed to make new multi hasher: %v", err)

View File

@ -54,7 +54,7 @@ func (b *Backend) includeTest(t *Test) bool {
// MakeRuns creates Run objects the Backend and Test // MakeRuns creates Run objects the Backend and Test
// //
// There can be several created, one for each combination of optionl // There can be several created, one for each combination of optional
// flags (eg FastList) // flags (eg FastList)
func (b *Backend) MakeRuns(t *Test) (runs []*Run) { func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
if !b.includeTest(t) { if !b.includeTest(t) {

View File

@ -12,7 +12,7 @@ start() {
docker run --rm -d --name $NAME \ docker run --rm -d --name $NAME \
-e "OWNCLOUD_DOMAIN=${OWNCLOUD_DOMAIN}" \ -e "OWNCLOUD_DOMAIN=${OWNCLOUD_DOMAIN}" \
-e "OWNCLOUD_DB_TYPE=sqlite" \ -e "OWNCLOUD_DB_TYPE=sqlite" \
-e "OWNCLOUD_DB_NAME=oowncloud.db" \ -e "OWNCLOUD_DB_NAME=owncloud.db" \
-e "OWNCLOUD_ADMIN_USERNAME=$USER" \ -e "OWNCLOUD_ADMIN_USERNAME=$USER" \
-e "OWNCLOUD_ADMIN_PASSWORD=$PASS" \ -e "OWNCLOUD_ADMIN_PASSWORD=$PASS" \
-e "OWNCLOUD_MYSQL_UTF8MB4=true" \ -e "OWNCLOUD_MYSQL_UTF8MB4=true" \

View File

@ -195,7 +195,7 @@ func (mask *MultiEncoder) Set(in string) error {
return nil return nil
} }
// Type returns a textual type of the MultiEncoder to satsify the pflag.Value interface // Type returns a textual type of the MultiEncoder to satisfy the pflag.Value interface
func (mask MultiEncoder) Type() string { func (mask MultiEncoder) Type() string {
return "Encoding" return "Encoding"
} }

View File

@ -32,7 +32,7 @@ func String(n int) string {
// memorable. The password is composed of printable ASCII characters // memorable. The password is composed of printable ASCII characters
// from the base64 alphabet. // from the base64 alphabet.
// //
// Requres password strength in bits. // Requires password strength in bits.
// 64 is just about memorable // 64 is just about memorable
// 128 is secure // 128 is secure
func Password(bits int) (password string, err error) { func Password(bits int) (password string, err error) {

View File

@ -102,7 +102,7 @@ func (rs *Ranges) coalesce(i int) {
endChop = j + 1 endChop = j + 1
} }
if endChop > startChop { if endChop > startChop {
// chop the uneeded ranges out // chop the unneeded ranges out
copy(ranges[startChop:], ranges[endChop:]) copy(ranges[startChop:], ranges[endChop:])
*rs = ranges[:len(ranges)-endChop+startChop] *rs = ranges[:len(ranges)-endChop+startChop]
} }

View File

@ -15,7 +15,7 @@ import (
// and b will be set. // and b will be set.
// //
// This is useful for copying between almost identical structures that // This is useful for copying between almost identical structures that
// are requently present in auto generated code for cloud storage // are frequently present in auto generated code for cloud storage
// interfaces. // interfaces.
func SetFrom(a, b interface{}) { func SetFrom(a, b interface{}) {
ta := reflect.TypeOf(a).Elem() ta := reflect.TypeOf(a).Elem()

View File

@ -57,7 +57,7 @@ func TestSetFrom(t *testing.T) {
Matching: "a", Matching: "a",
OnlyA: "onlyA", OnlyA: "onlyA",
MatchingInt: 1, MatchingInt: 1,
DifferentType: "suprise", DifferentType: "surprise",
} }
b := bType{ b := bType{
@ -75,7 +75,7 @@ func TestSetFrom(t *testing.T) {
Matching: "b", Matching: "b",
OnlyA: "onlyA", OnlyA: "onlyA",
MatchingInt: 2, MatchingInt: 2,
DifferentType: "suprise", DifferentType: "surprise",
}, a) }, a)
assert.Equal(t, bBefore, b) assert.Equal(t, bBefore, b)
@ -86,7 +86,7 @@ func TestSetFromReversed(t *testing.T) {
Matching: "a", Matching: "a",
OnlyA: "onlyA", OnlyA: "onlyA",
MatchingInt: 1, MatchingInt: 1,
DifferentType: "suprise", DifferentType: "surprise",
} }
aBefore := a aBefore := a

View File

@ -12,7 +12,7 @@ Todo
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files * Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
* FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs * FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs
creation in common code? Or try for as much as possible? creation in common code? Or try for as much as possible?
* FIXME Account all the transactons (ls etc) using a different * FIXME Account all the transactions (ls etc) using a different
Roundtripper wrapper which wraps the transactions? Roundtripper wrapper which wraps the transactions?
Ideas Ideas

View File

@ -121,7 +121,7 @@ func (d *Dir) Inode() uint64 {
return d.inode return d.inode
} }
// Node returns the Node assocuated with this - satisfies Noder interface // Node returns the Node associated with this - satisfies Noder interface
func (d *Dir) Node() Node { func (d *Dir) Node() Node {
return d return d
} }
@ -254,7 +254,7 @@ func (d *Dir) countActiveWriters() (writers int) {
} }
// age returns the duration since the last time the directory contents // age returns the duration since the last time the directory contents
// was read and the content is cosidered stale. age will be 0 and // was read and the content is considered stale. age will be 0 and
// stale true if the last read time is empty. // stale true if the last read time is empty.
// age must be called with d.mu held. // age must be called with d.mu held.
func (d *Dir) _age(when time.Time) (age time.Duration, stale bool) { func (d *Dir) _age(when time.Time) (age time.Duration, stale bool) {
@ -528,7 +528,7 @@ func (d *Dir) stat(leaf string) (Node, error) {
// duplicate case insensitive match is an error // duplicate case insensitive match is an error
return nil, errors.Errorf("duplicate filename %q detected with --vfs-case-insensitive set", leaf) return nil, errors.Errorf("duplicate filename %q detected with --vfs-case-insensitive set", leaf)
} }
// found a case insenstive match // found a case insensitive match
ok = true ok = true
item = node item = node
} }

View File

@ -35,7 +35,7 @@ func (fh *DirHandle) Stat() (fi os.FileInfo, err error) {
return fh.d, nil return fh.d, nil
} }
// Node returns the Node assocuated with this - satisfies Noder interface // Node returns the Node associated with this - satisfies Noder interface
func (fh *DirHandle) Node() Node { func (fh *DirHandle) Node() Node {
return fh.d return fh.d
} }

View File

@ -135,7 +135,7 @@ func (f *File) Inode() uint64 {
return f.inode return f.inode
} }
// Node returns the Node assocuated with this - satisfies Noder interface // Node returns the Node associated with this - satisfies Noder interface
func (f *File) Node() Node { func (f *File) Node() Node {
return f return f
} }
@ -499,7 +499,7 @@ func (f *File) openWrite(flags int) (fh *WriteFileHandle, err error) {
return fh, nil return fh, nil
} }
// openRW open the file for read and write using a temporay file // openRW open the file for read and write using a temporary file
// //
// It uses the open flags passed in. // It uses the open flags passed in.
func (f *File) openRW(flags int) (fh *RWFileHandle, err error) { func (f *File) openRW(flags int) (fh *RWFileHandle, err error) {
@ -611,7 +611,7 @@ func (f *File) Fs() fs.Fs {
// O_CREATE create a new file if none exists. // O_CREATE create a new file if none exists.
// O_EXCL used with O_CREATE, file must not exist // O_EXCL used with O_CREATE, file must not exist
// O_SYNC open for synchronous I/O. // O_SYNC open for synchronous I/O.
// O_TRUNC if possible, truncate file when opene // O_TRUNC if possible, truncate file when opened
// //
// We ignore O_SYNC and O_EXCL // We ignore O_SYNC and O_EXCL
func (f *File) Open(flags int) (fd Handle, err error) { func (f *File) Open(flags int) (fd Handle, err error) {

View File

@ -149,7 +149,7 @@ In this mode all reads and writes are buffered to and from disk. When
data is read from the remote this is buffered to disk as well. data is read from the remote this is buffered to disk as well.
In this mode the files in the cache will be sparse files and rclone In this mode the files in the cache will be sparse files and rclone
will keep track of which bits of the files it has dowloaded. will keep track of which bits of the files it has downloaded.
So if an application only reads the starts of each file, then rclone So if an application only reads the starts of each file, then rclone
will only buffer the start of the file. These files will appear to be will only buffer the start of the file. These files will appear to be

View File

@ -98,7 +98,7 @@ func (fh *ReadFileHandle) String() string {
return fh.file.String() + " (r)" return fh.file.String() + " (r)"
} }
// Node returns the Node assocuated with this - satisfies Noder interface // Node returns the Node associated with this - satisfies Noder interface
func (fh *ReadFileHandle) Node() Node { func (fh *ReadFileHandle) Node() Node {
fh.mu.Lock() fh.mu.Lock()
defer fh.mu.Unlock() defer fh.mu.Unlock()

View File

@ -118,7 +118,7 @@ func (fh *RWFileHandle) String() string {
return fh.file.String() + " (rw)" return fh.file.String() + " (rw)"
} }
// Node returns the Node assocuated with this - satisfies Noder interface // Node returns the Node associated with this - satisfies Noder interface
func (fh *RWFileHandle) Node() Node { func (fh *RWFileHandle) Node() Node {
fh.mu.Lock() fh.mu.Lock()
defer fh.mu.Unlock() defer fh.mu.Unlock()

View File

@ -108,7 +108,7 @@ type OsFiler interface {
WriteString(s string) (n int, err error) WriteString(s string) (n int, err error)
} }
// Handle is the interface statisified by open files or directories. // Handle is the interface satisfied by open files or directories.
// It is the methods on *os.File, plus a few more useful for FUSE // It is the methods on *os.File, plus a few more useful for FUSE
// filingsystems. Not all of them are supported. // filingsystems. Not all of them are supported.
type Handle interface { type Handle interface {

View File

@ -29,7 +29,7 @@ var (
// Constants uses in the tests // Constants uses in the tests
const ( const (
writeBackDelay = 100 * time.Millisecond // A short writeback delay for testing writeBackDelay = 100 * time.Millisecond // A short writeback delay for testing
waitForWritersDelay = 10 * time.Second // time to wait for exiting writiers waitForWritersDelay = 10 * time.Second // time to wait for existing writers
) )
// TestMain drives the tests // TestMain drives the tests

View File

@ -56,7 +56,7 @@ type Cache struct {
used int64 // total size of files in the cache used int64 // total size of files in the cache
outOfSpace bool // out of space outOfSpace bool // out of space
cleanerKicked bool // some thread kicked the cleaner upon out of space cleanerKicked bool // some thread kicked the cleaner upon out of space
kickerMu sync.Mutex // mutex for clearnerKicked kickerMu sync.Mutex // mutex for cleanerKicked
kick chan struct{} // channel for kicking clear to start kick chan struct{} // channel for kicking clear to start
} }
@ -69,7 +69,7 @@ type Cache struct {
// go into the directory tree. // go into the directory tree.
type AddVirtualFn func(remote string, size int64, isDir bool) error type AddVirtualFn func(remote string, size int64, isDir bool) error
// New creates a new cache heirachy for fremote // New creates a new cache hierarchy for fremote
// //
// This starts background goroutines which can be cancelled with the // This starts background goroutines which can be cancelled with the
// context passed in. // context passed in.
@ -336,7 +336,7 @@ func (c *Cache) Rename(name string, newName string, newObj fs.Object) (err error
// Remove should be called if name is deleted // Remove should be called if name is deleted
// //
// This returns true if the file was in the transfer queue so may not // This returns true if the file was in the transfer queue so may not
// have completedly uploaded yet. // have completely uploaded yet.
func (c *Cache) Remove(name string) (wasWriting bool) { func (c *Cache) Remove(name string) (wasWriting bool) {
name = clean(name) name = clean(name)
c.mu.Lock() c.mu.Lock()
@ -461,7 +461,7 @@ func (c *Cache) removeNotInUse(item *Item, maxAge time.Duration, emptyOnly bool)
// Retry failed resets during purgeClean() // Retry failed resets during purgeClean()
func (c *Cache) retryFailedResets() { func (c *Cache) retryFailedResets() {
// Some items may have failed to reset becasue there was not enough space // Some items may have failed to reset because there was not enough space
// for saving the cache item's metadata. Redo the Reset()'s here now that // for saving the cache item's metadata. Redo the Reset()'s here now that
// we may have some available space. // we may have some available space.
if len(c.errItems) != 0 { if len(c.errItems) != 0 {
@ -625,7 +625,7 @@ func (c *Cache) clean(removeCleanFiles bool) {
c.purgeOverQuota(int64(c.opt.CacheMaxSize)) c.purgeOverQuota(int64(c.opt.CacheMaxSize))
// removeCleanFiles indicates that we got ENOSPC error // removeCleanFiles indicates that we got ENOSPC error
// We remove cache files that are not dirty if we are still avove the max cache size // We remove cache files that are not dirty if we are still above the max cache size
if removeCleanFiles { if removeCleanFiles {
c.purgeClean(int64(c.opt.CacheMaxSize)) c.purgeClean(int64(c.opt.CacheMaxSize))
c.retryFailedResets() c.retryFailedResets()

View File

@ -430,24 +430,24 @@ func TestCachePurgeOverQuota(t *testing.T) {
func TestCachePurgeClean(t *testing.T) { func TestCachePurgeClean(t *testing.T) {
r, c, cleanup := newItemTestCache(t) r, c, cleanup := newItemTestCache(t)
defer cleanup() defer cleanup()
contents, obj, patato1 := newFile(t, r, c, "existing") contents, obj, potato1 := newFile(t, r, c, "existing")
_ = contents _ = contents
// Open the object to create metadata for it // Open the object to create metadata for it
require.NoError(t, patato1.Open(obj)) require.NoError(t, potato1.Open(obj))
require.NoError(t, patato1.Open(obj)) require.NoError(t, potato1.Open(obj))
size, err := patato1.GetSize() size, err := potato1.GetSize()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, int64(100), size) assert.Equal(t, int64(100), size)
// Read something to instantiate the cache file // Read something to instantiate the cache file
buf := make([]byte, 10) buf := make([]byte, 10)
_, err = patato1.ReadAt(buf, 10) _, err = potato1.ReadAt(buf, 10)
require.NoError(t, err) require.NoError(t, err)
// Test cache file present // Test cache file present
_, err = os.Stat(patato1.c.toOSPath(patato1.name)) _, err = os.Stat(potato1.c.toOSPath(potato1.name))
require.NoError(t, err) require.NoError(t, err)
// Add some potatos // Add some potatos
@ -476,8 +476,8 @@ func TestCachePurgeClean(t *testing.T) {
}, itemSpaceAsString(c)) }, itemSpaceAsString(c))
assert.Equal(t, int64(6), c.used) assert.Equal(t, int64(6), c.used)
require.NoError(t, patato1.Close(nil)) require.NoError(t, potato1.Close(nil))
require.NoError(t, patato1.Close(nil)) require.NoError(t, potato1.Close(nil))
require.NoError(t, potato3.Close(nil)) require.NoError(t, potato3.Close(nil))
// Remove all files now. The are all not in use. // Remove all files now. The are all not in use.

View File

@ -298,7 +298,7 @@ func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) {
r = dls.item.FindMissing(r) r = dls.item.FindMissing(r)
// If the range is entirely present then we only need to start a // If the range is entirely present then we only need to start a
// dowloader if the window isn't full. // downloader if the window isn't full.
startNew := true startNew := true
if r.IsEmpty() { if r.IsEmpty() {
// Make a new range which includes the window // Make a new range which includes the window
@ -561,7 +561,7 @@ func (dl *downloader) close(inErr error) (err error) {
return nil return nil
} }
// closed returns true if the downloader has been closed alread // closed returns true if the downloader has been closed already
func (dl *downloader) closed() bool { func (dl *downloader) closed() bool {
dl.mu.Lock() dl.mu.Lock()
defer dl.mu.Unlock() defer dl.mu.Unlock()

View File

@ -246,7 +246,7 @@ func (item *Item) _truncate(size int64) (err error) {
// Use open handle if available // Use open handle if available
fd := item.fd fd := item.fd
if fd == nil { if fd == nil {
// If the metadata says we have some blockes cached then the // If the metadata says we have some blocks cached then the
// file should exist, so open without O_CREATE // file should exist, so open without O_CREATE
oFlags := os.O_WRONLY oFlags := os.O_WRONLY
if item.info.Rs.Size() == 0 { if item.info.Rs.Size() == 0 {
@ -647,7 +647,7 @@ func (item *Item) Close(storeFn StoreFn) (err error) {
// If the file is dirty ensure any segments not transferred // If the file is dirty ensure any segments not transferred
// are brought in first. // are brought in first.
// //
// FIXME It would be nice to do this asynchronously howeve it // FIXME It would be nice to do this asynchronously however it
// would require keeping the downloaders alive after the item // would require keeping the downloaders alive after the item
// has been closed // has been closed
if item.info.Dirty && item.o != nil { if item.info.Dirty && item.o != nil {
@ -841,7 +841,7 @@ func (item *Item) _removeMeta(reason string) {
// remove the cached file and empty the metadata // remove the cached file and empty the metadata
// //
// This returns true if the file was in the transfer queue so may not // This returns true if the file was in the transfer queue so may not
// have completedly uploaded yet. // have completely uploaded yet.
// //
// call with lock held // call with lock held
func (item *Item) _remove(reason string) (wasWriting bool) { func (item *Item) _remove(reason string) (wasWriting bool) {
@ -859,7 +859,7 @@ func (item *Item) _remove(reason string) (wasWriting bool) {
// remove the cached file and empty the metadata // remove the cached file and empty the metadata
// //
// This returns true if the file was in the transfer queue so may not // This returns true if the file was in the transfer queue so may not
// have completedly uploaded yet. // have completely uploaded yet.
func (item *Item) remove(reason string) (wasWriting bool) { func (item *Item) remove(reason string) (wasWriting bool) {
item.mu.Lock() item.mu.Lock()
defer item.mu.Unlock() defer item.mu.Unlock()

View File

@ -548,7 +548,7 @@ func TestItemReadWrite(t *testing.T) {
assert.False(t, item.remove(fileName)) assert.False(t, item.remove(fileName))
}) })
// Read it back randomly concurently // Read it back randomly concurrently
t.Run("RandomConcurrent", func(t *testing.T) { t.Run("RandomConcurrent", func(t *testing.T) {
require.NoError(t, item.Open(obj)) require.NoError(t, item.Open(obj))
assert.False(t, item.present()) assert.False(t, item.present())

View File

@ -15,7 +15,7 @@ import (
) )
const ( const (
maxUploadDelay = 5 * time.Minute // max delay betwen upload attempts maxUploadDelay = 5 * time.Minute // max delay between upload attempts
) )
// PutFn is the interface that item provides to store the data // PutFn is the interface that item provides to store the data
@ -32,7 +32,7 @@ type WriteBack struct {
lookup map[Handle]*writeBackItem // for getting a *writeBackItem from a Handle - writeBackItems are in here until cancelled lookup map[Handle]*writeBackItem // for getting a *writeBackItem from a Handle - writeBackItems are in here until cancelled
opt *vfscommon.Options // VFS options opt *vfscommon.Options // VFS options
timer *time.Timer // next scheduled time for the uploader timer *time.Timer // next scheduled time for the uploader
expiry time.Time // time the next item exires or IsZero expiry time.Time // time the next item expires or IsZero
uploads int // number of uploads in progress uploads int // number of uploads in progress
// read and written with atomic // read and written with atomic

View File

@ -156,7 +156,7 @@ func TestDirModTime(t *testing.T) {
run.rmdir(t, "dir") run.rmdir(t, "dir")
} }
// TestDirCacheFlush tests fluching the dir cache // TestDirCacheFlush tests flushing the dir cache
func TestDirCacheFlush(t *testing.T) { func TestDirCacheFlush(t *testing.T) {
run.skipIfNoFUSE(t) run.skipIfNoFUSE(t)

View File

@ -97,7 +97,7 @@ func (fh *WriteFileHandle) String() string {
return fh.file.String() + " (w)" return fh.file.String() + " (w)"
} }
// Node returns the Node assocuated with this - satisfies Noder interface // Node returns the Node associated with this - satisfies Noder interface
func (fh *WriteFileHandle) Node() Node { func (fh *WriteFileHandle) Node() Node {
fh.mu.Lock() fh.mu.Lock()
defer fh.mu.Unlock() defer fh.mu.Unlock()
@ -122,7 +122,7 @@ func (fh *WriteFileHandle) WriteAt(p []byte, off int64) (n int, err error) {
return fh.writeAt(p, off) return fh.writeAt(p, off)
} }
// Implementatino of WriteAt - call with lock held // Implementation of WriteAt - call with lock held
func (fh *WriteFileHandle) writeAt(p []byte, off int64) (n int, err error) { func (fh *WriteFileHandle) writeAt(p []byte, off int64) (n int, err error) {
// defer log.Trace(fh.remote, "len=%d off=%d", len(p), off)("n=%d, fh.off=%d, err=%v", &n, &fh.offset, &err) // defer log.Trace(fh.remote, "len=%d off=%d", len(p), off)("n=%d, fh.off=%d, err=%v", &n, &fh.offset, &err)
if fh.closed { if fh.closed {