diff --git a/README.md b/README.md index 1e5b30cff..23496ca1c 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) * Box [:page_facing_up:](https://rclone.org/box/) * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) + * Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/) diff --git a/backend/all/all.go b/backend/all/all.go index f3ec8726c..9c81bc1af 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -31,6 +31,7 @@ import ( _ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/sftp" + _ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/union" _ "github.com/rclone/rclone/backend/webdav" diff --git a/backend/sharefile/api/types.go b/backend/sharefile/api/types.go new file mode 100644 index 000000000..8d96228c3 --- /dev/null +++ b/backend/sharefile/api/types.go @@ -0,0 +1,152 @@ +// Package api contains definitions for using the premiumize.me API +package api + +import ( + "fmt" + "time" + + "github.com/pkg/errors" +) + +// ListRequestSelect should be used in $select for Items/Children +const ListRequestSelect = "odata.count,FileCount,Name,FileName,CreationDate,IsHidden,FileSizeBytes,odata.type,Id,Hash,ClientModifiedDate" + +// ListResponse is returned from the Items/Children call +type ListResponse struct { + OdataCount int `json:"odata.count"` + Value []Item `json:"value"` +} + +// Item Types +const ( + ItemTypeFolder = "ShareFile.Api.Models.Folder" + ItemTypeFile = "ShareFile.Api.Models.File" +) + +// Item refers to a file or folder +type Item struct { + FileCount int32 `json:"FileCount,omitempty"` + Name string `json:"Name,omitempty"` + FileName string `json:"FileName,omitempty"` + CreatedAt time.Time `json:"CreationDate,omitempty"` + ModifiedAt time.Time `json:"ClientModifiedDate,omitempty"` + IsHidden bool `json:"IsHidden,omitempty"` + Size int64 `json:"FileSizeBytes,omitempty"` + Type string `json:"odata.type,omitempty"` + ID string `json:"Id,omitempty"` + Hash string `json:"Hash,omitempty"` +} + +// Error is an odata error return +type Error struct { + Code string `json:"code"` + Message struct { + Lang string `json:"lang"` + Value string `json:"value"` + } `json:"message"` + Reason string `json:"reason"` +} + +// Satisfy error interface +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s: %s", e.Message.Value, e.Code, e.Reason) +} + +// Check Error satisfies error interface +var _ error = &Error{} + +// DownloadSpecification is the response to /Items/Download +type DownloadSpecification struct { + Token string `json:"DownloadToken"` + URL string `json:"DownloadUrl"` + Metadata string `json:"odata.metadata"` + Type string `json:"odata.type"` +} + +// UploadRequest is set to /Items/Upload2 to receive an UploadSpecification +type UploadRequest struct { + Method string `json:"method"` // Upload method: one of: standard, streamed or threaded + Raw bool `json:"raw"` // Raw post if true or MIME upload if false + Filename string `json:"fileName"` // Uploaded item file name. + Filesize *int64 `json:"fileSize,omitempty"` // Uploaded item file size. + Overwrite bool `json:"overwrite"` // Indicates whether items with the same name will be overwritten or not. + CreatedDate time.Time `json:"ClientCreatedDate"` // Created Date of this Item. + ModifiedDate time.Time `json:"ClientModifiedDate"` // Modified Date of this Item. + BatchID string `json:"batchId,omitempty"` // Indicates part of a batch. Batched uploads do not send notification until the whole batch is completed. + BatchLast *bool `json:"batchLast,omitempty"` // Indicates is the last in a batch. Upload notifications for the whole batch are sent after this upload. + CanResume *bool `json:"canResume,omitempty"` // Indicates uploader supports resume. + StartOver *bool `json:"startOver,omitempty"` // Indicates uploader wants to restart the file - i.e., ignore previous failed upload attempts. + Tool string `json:"tool,omitempty"` // Identifies the uploader tool. + Title string `json:"title,omitempty"` // Item Title + Details string `json:"details,omitempty"` // Item description + IsSend *bool `json:"isSend,omitempty"` // Indicates that this upload is part of a Send operation + SendGUID string `json:"sendGuid,omitempty"` // Used if IsSend is true. Specifies which Send operation this upload is part of. + OpID string `json:"opid,omitempty"` // Used for Asynchronous copy/move operations - called by Zones to push files to other Zones + ThreadCount *int `json:"threadCount,omitempty"` // Specifies the number of threads the threaded uploader will use. Only used is method is threaded, ignored otherwise + Notify *bool `json:"notify,omitempty"` // Indicates whether users will be notified of this upload - based on folder preferences + ExpirationDays *int `json:"expirationDays,omitempty"` // File expiration days + BaseFileID string `json:"baseFileId,omitempty"` // Used to check conflict in file during File Upload. +} + +// UploadSpecification is returned from /Items/Upload +type UploadSpecification struct { + Method string `json:"Method"` // The Upload method that must be used for this upload + PrepareURI string `json:"PrepareUri"` // If provided, clients must issue a request to this Uri before uploading any data. + ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to + FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process + ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads + IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted. + ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true. + ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true + ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server + MaxNumberOfThreads int `json:"MaxNumberOfThreads"` // Specifies the max number of chunks that can be sent simultaneously for threaded uploads +} + +// UploadFinishResponse is returnes from calling UploadSpecification.FinishURI +type UploadFinishResponse struct { + Error bool `json:"error"` + ErrorMessage string `json:"errorMessage"` + ErrorCode int `json:"errorCode"` + Value []struct { + UploadID string `json:"uploadid"` + ParentID string `json:"parentid"` + ID string `json:"id"` + StreamID string `json:"streamid"` + FileName string `json:"filename"` + DisplayName string `json:"displayname"` + Size int `json:"size"` + Md5 string `json:"md5"` + } `json:"value"` +} + +// ID returns the ID of the first response if available +func (finish *UploadFinishResponse) ID() (string, error) { + if finish.Error { + return "", errors.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode) + } + if len(finish.Value) == 0 { + return "", errors.New("upload failed: no results returned") + } + return finish.Value[0].ID, nil +} + +// Parent is the ID of the parent folder +type Parent struct { + ID string `json:"Id,omitempty"` +} + +// Zone is where the data is stored +type Zone struct { + ID string `json:"Id,omitempty"` +} + +// UpdateItemRequest is sent to PATCH /v3/Items(id) +type UpdateItemRequest struct { + Name string `json:"Name,omitempty"` + FileName string `json:"FileName,omitempty"` + Description string `json:"Description,omitempty"` + ExpirationDate *time.Time `json:"ExpirationDate,omitempty"` + Parent *Parent `json:"Parent,omitempty"` + Zone *Zone `json:"Zone,omitempty"` + ModifiedAt *time.Time `json:"ClientModifiedDate,omitempty"` +} diff --git a/backend/sharefile/generate_tzdata.go b/backend/sharefile/generate_tzdata.go new file mode 100644 index 000000000..4f86911c5 --- /dev/null +++ b/backend/sharefile/generate_tzdata.go @@ -0,0 +1,22 @@ +// +build ignore + +package main + +import ( + "log" + "net/http" + + "github.com/shurcooL/vfsgen" +) + +func main() { + var AssetDir http.FileSystem = http.Dir("./tzdata") + err := vfsgen.Generate(AssetDir, vfsgen.Options{ + PackageName: "sharefile", + BuildTags: "!dev", + VariableName: "tzdata", + }) + if err != nil { + log.Fatalln(err) + } +} diff --git a/backend/sharefile/replace.go b/backend/sharefile/replace.go new file mode 100644 index 000000000..3f8f4b8eb --- /dev/null +++ b/backend/sharefile/replace.go @@ -0,0 +1,76 @@ +/* +Translate file names for sharefile +*/ + +package sharefile + +import ( + "regexp" + "strings" +) + +// charMap holds replacements for characters +// +// Sharefile has a restricted set of characters compared to other +// cloud storage systems, so we to map these to the FULLWIDTH unicode +// equivalents +// +// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS +var ( + charMap = map[rune]rune{ + '\\': '\', // FULLWIDTH REVERSE SOLIDUS + '*': '*', // FULLWIDTH ASTERISK + '<': '<', // FULLWIDTH LESS-THAN SIGN + '>': '>', // FULLWIDTH GREATER-THAN SIGN + '?': '?', // FULLWIDTH QUESTION MARK + ':': ':', // FULLWIDTH COLON + '|': '|', // FULLWIDTH VERTICAL LINE + '"': '"', // FULLWIDTH QUOTATION MARK + '.': '.', // FULLWIDTH FULL STOP + ' ': '␠', // SYMBOL FOR SPACE + } + invCharMap map[rune]rune + fixStartingWithPeriod = regexp.MustCompile(`(/|^)\.`) + fixEndingWithPeriod = regexp.MustCompile(`\.(/|$)`) + fixStartingWithSpace = regexp.MustCompile(`(/|^) `) + fixEndingWithSpace = regexp.MustCompile(` (/|$)`) +) + +func init() { + // Create inverse charMap + invCharMap = make(map[rune]rune, len(charMap)) + for k, v := range charMap { + invCharMap[v] = k + } +} + +// replaceReservedChars takes a path and substitutes any reserved +// characters in it +func replaceReservedChars(in string) string { + // Names can't start with a period '.' + in = fixStartingWithPeriod.ReplaceAllString(in, "$1"+string(charMap['.'])) + // Names can't end with a period '.' + in = fixEndingWithPeriod.ReplaceAllString(in, string(charMap['.'])+"$1") + // Names can't start with space + in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' '])) + // Names can't end with space + in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1") + // Replace reserved characters + return strings.Map(func(c rune) rune { + if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' { + return replacement + } + return c + }, in) +} + +// restoreReservedChars takes a path and undoes any substitutions +// made by replaceReservedChars +func restoreReservedChars(in string) string { + return strings.Map(func(c rune) rune { + if replacement, ok := invCharMap[c]; ok { + return replacement + } + return c + }, in) +} diff --git a/backend/sharefile/replace_test.go b/backend/sharefile/replace_test.go new file mode 100644 index 000000000..9450d69a9 --- /dev/null +++ b/backend/sharefile/replace_test.go @@ -0,0 +1,31 @@ +package sharefile + +import "testing" + +func TestReplace(t *testing.T) { + for _, test := range []struct { + in string + out string + }{ + {"", ""}, + {"abc 123", "abc 123"}, + {`\*<>?:|#%".~`, `\*<>?:|#%".~`}, + {`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`}, + {" leading space", "␠leading space"}, + {"trailing space ", "trailing space␠"}, + {".leading dot", ".leading dot"}, + {"trailing dot.", "trailing dot."}, + {" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"}, + {"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."}, + {".leading dot/..leading dot/.leading dot", ".leading dot/..leading dot/.leading dot"}, + } { + got := replaceReservedChars(test.in) + if got != test.out { + t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got) + } + got2 := restoreReservedChars(got) + if got2 != test.in { + t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2) + } + } +} diff --git a/backend/sharefile/sharefile.go b/backend/sharefile/sharefile.go new file mode 100644 index 000000000..0b179eb00 --- /dev/null +++ b/backend/sharefile/sharefile.go @@ -0,0 +1,1515 @@ +// Package sharefile provides an interface to the Citrix Sharefile +// object storage system. +package sharefile + +//go:generate ./update-timezone.sh + +/* NOTES + +## for docs + +Detail standard/chunked/streaming uploads? + +## Bugs in API + +The times in updateItem are being parsed in EST/DST local time +updateItem only sets times accurate to 1 second + +https://community.sharefilesupport.com/citrixsharefile/topics/bug-report-for-update-item-patch-items-id-setting-clientmodifieddate-ignores-timezone-and-milliseconds + +When doing a rename+move directory, the server appears to do the +rename first in the local directory which can overwrite files of the +same name in the local directory. + +https://community.sharefilesupport.com/citrixsharefile/topics/bug-report-for-update-item-patch-items-id-file-overwrite-under-certain-conditions + +The Copy command can't change the name at the same time which means we +have to copy via a temporary directory. + +https://community.sharefilesupport.com/citrixsharefile/topics/copy-item-needs-to-be-able-to-set-a-new-name + +## Allowed characters + +https://api.sharefile.com/rest/index/odata.aspx + +$select to limit returned fields +https://www.odata.org/documentation/odata-version-3-0/odata-version-3-0-core-protocol/#theselectsystemqueryoption + +Also $filter to select only things we need + +https://support.citrix.com/article/CTX234774 + +The following characters should not be used in folder or file names. + +\ +/ +. +, +: +; +* +? +" +< +> +A filename ending with a period without an extension +File names with leading or trailing whitespaces. + + +// sharefile +stringNeedsEscaping = []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x2A, 0x2E, 0x2F, 0x3A, 0x3C, 0x3E, 0x3F, 0x7C, 0xEFBCBC +} +maxFileLength = 256 +canWriteUnnormalized = true +canReadUnnormalized = true +canReadRenormalized = false +canStream = true + +Which is control chars + [' ', '*', '.', '/', ':', '<', '>', '?', '|'] +- also \ and " + +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/backend/sharefile/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/oauthutil" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/random" + "github.com/rclone/rclone/lib/rest" + "golang.org/x/oauth2" +) + +const ( + rcloneClientID = "djQUPlHTUM9EvayYBWuKC5IrVIoQde46" + rcloneEncryptedClientSecret = "v7572bKhUindQL3yDnUAebmgP-QxiwT38JLxVPolcZBl6SSs329MtFzH73x7BeELmMVZtneUPvALSopUZ6VkhQ" + minSleep = 10 * time.Millisecond + maxSleep = 2 * time.Second + decayConstant = 2 // bigger for slower decay, exponential + apiPath = "/sf/v3" // add to endpoint to get API path + tokenPath = "/oauth/token" // add to endpoint to get Token path + minChunkSize = 256 * fs.KibiByte + maxChunkSize = 2 * fs.GibiByte + defaultChunkSize = 64 * fs.MebiByte + defaultUploadCutoff = 128 * fs.MebiByte +) + +// Generate a new oauth2 config which we will update when we know the TokenURL +func newOauthConfig(tokenURL string) *oauth2.Config { + return &oauth2.Config{ + Scopes: nil, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://secure.sharefile.com/oauth/authorize", + TokenURL: tokenURL, + }, + ClientID: rcloneClientID, + ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), + RedirectURL: oauthutil.RedirectPublicSecureURL, + } +} + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "sharefile", + Description: "Citrix Sharefile", + NewFs: NewFs, + Config: func(name string, m configmap.Mapper) { + oauthConfig := newOauthConfig("") + checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error { + if auth == nil || auth.Form == nil { + return errors.New("endpoint not found in response") + } + subdomain := auth.Form.Get("subdomain") + apicp := auth.Form.Get("apicp") + if subdomain == "" || apicp == "" { + return errors.Errorf("subdomain or apicp not found in response: %+v", auth.Form) + } + endpoint := "https://" + subdomain + "." + apicp + m.Set("endpoint", endpoint) + oauthConfig.Endpoint.TokenURL = endpoint + tokenPath + return nil + } + err := oauthutil.ConfigWithCallback("sharefile", name, m, oauthConfig, checkAuth) + if err != nil { + log.Fatalf("Failed to configure token: %v", err) + } + }, + Options: []fs.Option{{ + Name: "upload_cutoff", + Help: "Cutoff for switching to multipart upload.", + Default: defaultUploadCutoff, + Advanced: true, + }, { + Name: "root_folder_id", + Help: `ID of the root folder + +Leave blank to access "Personal Folders". You can use one of the +standard values here or any folder ID (long hex number ID).`, + Examples: []fs.OptionExample{{ + Value: "", + Help: `Access the Personal Folders. (Default)`, + }, { + Value: "favorites", + Help: "Access the Favorites folder.", + }, { + Value: "allshared", + Help: "Access all the shared folders.", + }, { + Value: "connectors", + Help: "Access all the individual connectors.", + }, { + Value: "top", + Help: "Access the home, favorites, and shared folders as well as the connectors.", + }}, + }, { + Name: "chunk_size", + Default: defaultChunkSize, + Help: `Upload chunk size. Must a power of 2 >= 256k. + +Making this larger will improve performance, but note that each chunk +is buffered in memory one per transfer. + +Reducing this will reduce memory usage but decrease performance.`, + Advanced: true, + }, { + Name: "endpoint", + Help: `Endpoint for API calls. + +This is usually auto discovered as part of the oauth process, but can +be set manually to something like: https://XXX.sharefile.com +`, + Advanced: true, + Default: "", + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + RootFolderID string `config:"root_folder_id"` + UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + Endpoint string `config:"endpoint"` +} + +// Fs represents a remote cloud storage system +type Fs struct { + name string // name of this remote + root string // the path we are working on + opt Options // parsed options + features *fs.Features // optional features + srv *rest.Client // the connection to the server + dirCache *dircache.DirCache // Map of directory path to directory id + pacer *fs.Pacer // pacer for API calls + bufferTokens chan []byte // control concurrency of multipart uploads + tokenRenewer *oauthutil.Renew // renew the token on expiry + rootID string // ID of the users root folder + location *time.Location // timezone of server for SetModTime workaround +} + +// Object describes a file +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + hasMetaData bool // metadata is present and correct + size int64 // size of the object + modTime time.Time // modification time of the object + id string // ID of the object + md5 string // hash of the object +} + +// ------------------------------------------------------------ + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("sharefile root '%s'", f.root) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// parsePath parses a sharefile 'url' +func parsePath(path string) (root string) { + root = strings.Trim(path, "/") + return +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout + 509, // Bandwidth Limit Exceeded +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func shouldRetry(resp *http.Response, err error) (bool, error) { + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +// Reads the metadata for the id passed in. If id is "" then it returns the root +// if path is not "" then the item read use id as the root and the path is relative +func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { + opts := rest.Opts{ + Method: "GET", + Path: "/Items", + Parameters: url.Values{ + "$select": {api.ListRequestSelect}, + }, + } + if id != "" { + opts.Path += "(" + id + ")" + } + if path != "" { + opts.Path += "/ByPath" + opts.Parameters.Set("path", "/"+replaceReservedChars(path)) + } + var item api.Item + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &item) + return shouldRetry(resp, err) + }) + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + if filesOnly { + return nil, fs.ErrorObjectNotFound + } + return nil, fs.ErrorDirNotFound + } + return nil, errors.Wrap(err, "couldn't find item") + } + if directoriesOnly && item.Type != api.ItemTypeFolder { + return nil, fs.ErrorIsFile + } + if filesOnly && item.Type != api.ItemTypeFile { + return nil, fs.ErrorNotAFile + } + return &item, nil +} + +// Reads the metadata for the id passed in. If id is "" then it returns the root +func (f *Fs) readMetaDataForID(ctx context.Context, id string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { + return f.readMetaDataForIDPath(ctx, id, "", directoriesOnly, filesOnly) +} + +// readMetaDataForPath reads the metadata from the path +func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false) + if err != nil { + if err == fs.ErrorDirNotFound { + return nil, fs.ErrorObjectNotFound + } + return nil, err + } + return f.readMetaDataForIDPath(ctx, directoryID, leaf, directoriesOnly, filesOnly) +} + +// errorHandler parses a non 2xx error response into an error +func errorHandler(resp *http.Response) error { + body, err := rest.ReadBody(resp) + if err != nil { + body = nil + } + var e = api.Error{ + Code: fmt.Sprint(resp.StatusCode), + Reason: resp.Status, + } + e.Message.Lang = "en" + e.Message.Value = string(body) + if body != nil { + _ = json.Unmarshal(body, &e) + } + return &e +} + +func checkUploadChunkSize(cs fs.SizeSuffix) error { + if cs < minChunkSize { + return errors.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize) + } + if cs > maxChunkSize { + return errors.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize) + } + return nil +} + +func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + err = checkUploadChunkSize(cs) + if err == nil { + old, f.opt.ChunkSize = f.opt.ChunkSize, cs + f.fillBufferTokens() // reset the buffer tokens + } + return +} + +func checkUploadCutoff(cs fs.SizeSuffix) error { + return nil +} + +func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + err = checkUploadCutoff(cs) + if err == nil { + old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs + } + return +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + // Check parameters OK + if opt.Endpoint == "" { + return nil, errors.New("endpoint not set: rebuild the remote or set manually") + } + err = checkUploadChunkSize(opt.ChunkSize) + if err != nil { + return nil, err + } + err = checkUploadCutoff(opt.UploadCutoff) + if err != nil { + return nil, err + } + + root = parsePath(root) + + oauthConfig := newOauthConfig(opt.Endpoint + tokenPath) + var client *http.Client + var ts *oauthutil.TokenSource + client, ts, err = oauthutil.NewClient(name, m, oauthConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to configure sharefile") + } + + f := &Fs{ + name: name, + root: root, + opt: *opt, + srv: rest.NewClient(client).SetRoot(opt.Endpoint + apiPath), + pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + } + f.features = (&fs.Features{ + CaseInsensitive: true, + CanHaveEmptyDirectories: true, + ReadMimeType: false, + }).Fill(f) + f.srv.SetErrorHandler(errorHandler) + f.fillBufferTokens() + + // Renew the token in the background + if ts != nil { + f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { + _, err := f.List(ctx, "") + return err + }) + } + + // Load the server timezone from an internal file + // Used to correct the time in SetModTime + const serverTimezone = "America/New_York" + timezone, err := tzdata.Open(serverTimezone) + if err != nil { + return nil, errors.Wrap(err, "failed to open timezone db") + } + tzdata, err := ioutil.ReadAll(timezone) + if err != nil { + return nil, errors.Wrap(err, "failed to read timezone") + } + _ = timezone.Close() + f.location, err = time.LoadLocationFromTZData(serverTimezone, tzdata) + if err != nil { + return nil, errors.Wrap(err, "failed to load location from timezone") + } + + // Find ID of user's root folder + if opt.RootFolderID == "" { + item, err := f.readMetaDataForID(ctx, opt.RootFolderID, true, false) + if err != nil { + return nil, errors.Wrap(err, "couldn't find root ID") + } + f.rootID = item.ID + } else { + f.rootID = opt.RootFolderID + } + + // Get rootID + f.dirCache = dircache.New(root, f.rootID, f) + + // Find the current root + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + // Assume it is a file + newRoot, remote := dircache.SplitPath(root) + tempF := *f + tempF.dirCache = dircache.New(newRoot, f.rootID, &tempF) + tempF.root = newRoot + // Make new Fs which is the parent + err = tempF.dirCache.FindRoot(ctx, false) + if err != nil { + // No root so return old f + return f, nil + } + _, err := tempF.newObjectWithInfo(ctx, remote, nil) + if err != nil { + if err == fs.ErrorObjectNotFound { + // File doesn't exist so return old f + return f, nil + } + return nil, err + } + f.features.Fill(&tempF) + // XXX: update the old f here instead of returning tempF, since + // `features` were already filled with functions having *f as a receiver. + // See https://github.com/rclone/rclone/issues/2182 + f.dirCache = tempF.dirCache + f.root = tempF.root + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + return f, nil +} + +// Fill up (or reset) the buffer tokens +func (f *Fs) fillBufferTokens() { + f.bufferTokens = make(chan []byte, fs.Config.Transfers) + for i := 0; i < fs.Config.Transfers; i++ { + f.bufferTokens <- nil + } +} + +// getUploadBlock gets a block from the pool of size chunkSize +func (f *Fs) getUploadBlock() []byte { + buf := <-f.bufferTokens + if buf == nil { + buf = make([]byte, f.opt.ChunkSize) + } + // fs.Debugf(f, "Getting upload block %p", buf) + return buf +} + +// putUploadBlock returns a block to the pool of size chunkSize +func (f *Fs) putUploadBlock(buf []byte) { + buf = buf[:cap(buf)] + if len(buf) != int(f.opt.ChunkSize) { + panic("bad blocksize returned to pool") + } + // fs.Debugf(f, "Returning upload block %p", buf) + f.bufferTokens <- buf +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + var err error + if info != nil { + // Set info + err = o.setMetaData(info) + } else { + err = o.readMetaData(ctx) // reads info and meta, returning an error + } + if err != nil { + return nil, err + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) +} + +// FindLeaf finds a directory of name leaf in the folder with ID pathID +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { + if pathID == "top" { + // Find the leaf in pathID + found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { + if item.Name == leaf { + pathIDOut = item.ID + return true + } + return false + }) + return pathIDOut, found, err + } + info, err := f.readMetaDataForIDPath(ctx, pathID, leaf, true, false) + if err == nil { + found = true + pathIDOut = info.ID + } else if err == fs.ErrorDirNotFound { + err = nil // don't return an error if not found + } + return pathIDOut, found, err +} + +// CreateDir makes a directory with pathID as parent and name leaf +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { + var resp *http.Response + leaf = replaceReservedChars(leaf) + var req = api.Item{ + Name: leaf, + FileName: leaf, + CreatedAt: time.Now(), + } + var info api.Item + opts := rest.Opts{ + Method: "POST", + Path: "/Items(" + pathID + ")/Folder", + Parameters: url.Values{ + "$select": {api.ListRequestSelect}, + "overwrite": {"false"}, + "passthrough": {"false"}, + }, + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &req, &info) + return shouldRetry(resp, err) + }) + if err != nil { + return "", errors.Wrap(err, "CreateDir") + } + return info.ID, nil +} + +// list the objects into the function supplied +// +// If directories is set it only sends directories +// User function to process a File item from listAll +// +// Should return true to finish processing +type listAllFn func(*api.Item) bool + +// Lists the directory required calling the user function on each item found +// +// If the user fn ever returns true then it early exits with found = true +func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { + opts := rest.Opts{ + Method: "GET", + Path: "/Items(" + dirID + ")/Children", + Parameters: url.Values{ + "$select": {api.ListRequestSelect}, + }, + } + + var result api.ListResponse + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return found, errors.Wrap(err, "couldn't list files") + } + for i := range result.Value { + item := &result.Value[i] + if item.Type == api.ItemTypeFolder { + if filesOnly { + continue + } + } else if item.Type == api.ItemTypeFile { + if directoriesOnly { + continue + } + } else { + fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) + continue + } + item.Name = restoreReservedChars(item.Name) + if fn(item) { + found = true + break + } + } + + return +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + return nil, err + } + directoryID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return nil, err + } + var iErr error + _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { + remote := path.Join(dir, info.Name) + if info.Type == api.ItemTypeFolder { + // cache the directory ID for later lookups + f.dirCache.Put(remote, info.ID) + d := fs.NewDir(remote, info.CreatedAt).SetID(info.ID).SetSize(info.Size).SetItems(int64(info.FileCount)) + entries = append(entries, d) + } else if info.Type == api.ItemTypeFile { + o, err := f.newObjectWithInfo(ctx, remote, info) + if err != nil { + iErr = err + return true + } + entries = append(entries, o) + } + return false + }) + if err != nil { + return nil, err + } + if iErr != nil { + return nil, iErr + } + return entries, nil +} + +// Creates from the parameters passed in a half finished Object which +// must have setMetaData called on it +// +// Returns the object, leaf, directoryID and error +// +// Used to create new objects +func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { + // Create the directory for the object if it doesn't exist + leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true) + if err != nil { + return + } + // Temporary Object under construction + o = &Object{ + fs: f, + remote: remote, + } + return o, leaf, directoryID, nil +} + +// Put the object +// +// Copy the reader in to the new object which is returned +// +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) + switch err { + case nil: + return existingObj, existingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + // Not found so create it + return f.PutUnchecked(ctx, in, src) + default: + return nil, err + } +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +// PutUnchecked the object into the container +// +// This will produce an error if the object already exists +// +// Copy the reader in to the new object which is returned +// +// The new object may have been created if an error is returned +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + remote := src.Remote() + size := src.Size() + modTime := src.ModTime(ctx) + + o, _, _, err := f.createObject(ctx, remote, modTime, size) + if err != nil { + return nil, err + } + return o, o.Update(ctx, in, src, options...) +} + +// Mkdir creates the container if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + err := f.dirCache.FindRoot(ctx, true) + if err != nil { + return err + } + if dir != "" { + _, err = f.dirCache.FindDir(ctx, dir, true) + } + return err +} + +// purgeCheck removes the directory, if check is set then it refuses +// to do so if it has anything in +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { + root := path.Join(f.root, dir) + if root == "" { + return errors.New("can't purge root directory") + } + dc := f.dirCache + err := dc.FindRoot(ctx, false) + if err != nil { + return err + } + rootID, err := dc.FindDir(ctx, dir, false) + if err != nil { + return err + } + + // need to check if empty as it will delete recursively by default + if check { + found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool { + return true + }) + if err != nil { + return errors.Wrap(err, "purgeCheck") + } + if found { + return fs.ErrorDirectoryNotEmpty + } + } + + err = f.remove(ctx, rootID) + f.dirCache.FlushDir(dir) + if err != nil { + return err + } + return nil +} + +// Rmdir deletes the root folder +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) +} + +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + // sharefile returns times accurate to the millisecond, but + // for some reason these seem only accurate 2ms. + // updateItem seems to only set times accurate to 1 second though. + return time.Second // this doesn't appear to be documented anywhere +} + +// Purge deletes all the files and the container +// +// Optional interface: Only implement this if you have a way of +// deleting all the files quicker than just running Remove() on the +// result of List() +func (f *Fs) Purge(ctx context.Context) error { + return f.purgeCheck(ctx, "", false) +} + +// updateItem patches a file or folder +// +// if leaf = "" or directoryID = "" or modTime == nil then it will be +// left alone +// +// Note that this seems to work by renaming first, then moving to a +// new directory which means that it can overwrite existing objects +// :-( +func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTime *time.Time) (info *api.Item, err error) { + // Move the object + opts := rest.Opts{ + Method: "PATCH", + Path: "/Items(" + id + ")", + Parameters: url.Values{ + "$select": {api.ListRequestSelect}, + "overwrite": {"false"}, + }, + } + leaf = replaceReservedChars(leaf) + // FIXME this appears to be a bug in the API + // + // If you set the modified time via PATCH then the server + // appears to parse it as a local time for America/New_York + // + // However if you set it when uploading the file then it is fine... + // + // Also it only sets the time to 1 second resolution where it + // uses 1ms resolution elsewhere + if modTime != nil && f.location != nil { + newTime := modTime.In(f.location) + isoTime := newTime.Format(time.RFC3339Nano) + // Chop TZ -05:00 off the end and replace with Z + isoTime = isoTime[:len(isoTime)-6] + "Z" + // Parse it back into a time + newModTime, err := time.Parse(time.RFC3339Nano, isoTime) + if err != nil { + return nil, errors.Wrap(err, "updateItem: time parse") + } + modTime = &newModTime + } + update := api.UpdateItemRequest{ + Name: leaf, + FileName: leaf, + ModifiedAt: modTime, + } + if directoryID != "" { + update.Parent = &api.Parent{ + ID: directoryID, + } + } + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &update, &info) + return shouldRetry(resp, err) + }) + if err != nil { + return nil, err + } + return info, nil +} + +// move a file or folder +// +// This is complicated by the fact that we can't use updateItem to move +// to a different directory AND rename at the same time as it can +// overwrite files in the source directory. +func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (item *api.Item, err error) { + // To demonstrate bug + // item, err = f.updateItem(ctx, id, newLeaf, newDirectoryID, nil) + // if err != nil { + // return nil, errors.Wrap(err, "Move rename leaf") + // } + // return item, nil + doRenameLeaf := oldLeaf != newLeaf + doMove := oldDirectoryID != newDirectoryID + + // Now rename the leaf to a temporary name if we are moving to + // another directory to make sure we don't overwrite something + // in the source directory by accident + if doRenameLeaf && doMove { + tmpLeaf := newLeaf + "." + random.String(8) + item, err = f.updateItem(ctx, id, tmpLeaf, "", nil) + if err != nil { + return nil, errors.Wrap(err, "Move rename leaf") + } + } + + // Move the object to a new directory (with the existing name) + // if required + if doMove { + item, err = f.updateItem(ctx, id, "", newDirectoryID, nil) + if err != nil { + return nil, errors.Wrap(err, "Move directory") + } + } + + // Rename the leaf to its final name if required + if doRenameLeaf { + item, err = f.updateItem(ctx, id, newLeaf, "", nil) + if err != nil { + return nil, errors.Wrap(err, "Move rename leaf") + } + } + + return item, nil +} + +// Move src to this remote using server side move operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + + // Find ID of src parent, not creating subdirs + srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) + if err != nil { + return nil, err + } + + // Create temporary object + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) + if err != nil { + return nil, err + } + + // Do the move + info, err := f.move(ctx, true, srcObj.id, srcLeaf, leaf, srcParentID, directoryID) + if err != nil { + return nil, err + } + + err = dstObj.setMetaData(info) + if err != nil { + return nil, err + } + return dstObj, nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + srcPath := path.Join(srcFs.root, srcRemote) + dstPath := path.Join(f.root, dstRemote) + + // Refuse to move to or from the root + if srcPath == "" || dstPath == "" { + fs.Debugf(src, "DirMove error: Can't move root") + return errors.New("can't move root directory") + } + + // find the root src directory + err := srcFs.dirCache.FindRoot(ctx, false) + if err != nil { + return err + } + + // find the root dst directory + if dstRemote != "" { + err = f.dirCache.FindRoot(ctx, true) + if err != nil { + return err + } + } else { + if f.dirCache.FoundRoot() { + return fs.ErrorDirExists + } + } + + // Find ID of dst parent, creating subdirs if necessary + var leaf, directoryID string + findPath := dstRemote + if dstRemote == "" { + findPath = f.root + } + leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true) + if err != nil { + return err + } + + // Check destination does not exist + if dstRemote != "" { + _, err = f.dirCache.FindDir(ctx, dstRemote, false) + if err == fs.ErrorDirNotFound { + // OK + } else if err != nil { + return err + } else { + return fs.ErrorDirExists + } + } + + // Find ID of src + srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) + if err != nil { + return err + } + + // Find ID of src parent, not creating subdirs + var srcLeaf, srcDirectoryID string + findPath = srcRemote + if srcRemote == "" { + findPath = srcFs.root + } + srcLeaf, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false) + if err != nil { + return err + } + + // Do the move + _, err = f.move(ctx, false, srcID, srcLeaf, leaf, srcDirectoryID, directoryID) + if err != nil { + return err + } + srcFs.dirCache.FlushDir(srcRemote) + return nil +} + +// Copy src to this remote using server side copy operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + + err = srcObj.readMetaData(ctx) + if err != nil { + return nil, err + } + + // Find ID of src parent, not creating subdirs + srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) + if err != nil { + return nil, err + } + srcLeaf = replaceReservedChars(srcLeaf) + _ = srcParentID + + // Create temporary object + dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) + if err != nil { + return nil, err + } + dstLeaf = replaceReservedChars(dstLeaf) + + sameName := strings.ToLower(srcLeaf) == strings.ToLower(dstLeaf) + if sameName && srcParentID == dstParentID { + return nil, errors.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf) + } + + // Discover whether we can just copy directly or not + directCopy := false + if sameName { + // if copying to same name can copy directly + directCopy = true + } else { + // if (dstParentID, srcLeaf) does not exist then can + // Copy then Rename without fear of overwriting + // something + _, err := f.readMetaDataForIDPath(ctx, dstParentID, srcLeaf, false, false) + if err == fs.ErrorObjectNotFound || err == fs.ErrorDirNotFound { + directCopy = true + } else if err != nil { + return nil, errors.Wrap(err, "copy: failed to examine destination dir") + } else { + // otherwise need to copy via a temporary directlry + } + } + + // Copy direct to destination unless !directCopy in which case + // copy via a temporary directory + copyTargetDirID := dstParentID + if !directCopy { + // Create a temporary directory to copy the object in to + tmpDir := "rclone-temp-dir-" + random.String(16) + err = f.Mkdir(ctx, tmpDir) + if err != nil { + return nil, errors.Wrap(err, "copy: failed to make temp dir") + } + defer func() { + rmdirErr := f.Rmdir(ctx, tmpDir) + if rmdirErr != nil && err == nil { + err = errors.Wrap(rmdirErr, "copy: failed to remove temp dir") + } + }() + tmpDirID, err := f.dirCache.FindDir(ctx, tmpDir, false) + if err != nil { + return nil, errors.Wrap(err, "copy: failed to find temp dir") + } + copyTargetDirID = tmpDirID + } + + // Copy the object + opts := rest.Opts{ + Method: "POST", + Path: "/Items(" + srcObj.id + ")/Copy", + Parameters: url.Values{ + "$select": {api.ListRequestSelect}, + "overwrite": {"false"}, + "targetid": {copyTargetDirID}, + }, + } + var resp *http.Response + var info *api.Item + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) + return shouldRetry(resp, err) + }) + if err != nil { + return nil, err + } + + // Rename into the correct name and directory if required and + // set the modtime since the copy doesn't preserve it + var updateParentID, updateLeaf string // only set these if necessary + if srcLeaf != dstLeaf { + updateLeaf = dstLeaf + } + if !directCopy { + updateParentID = dstParentID + } + // set new modtime regardless + info, err = f.updateItem(ctx, info.ID, updateLeaf, updateParentID, &srcObj.modTime) + if err != nil { + return nil, err + } + err = dstObj.setMetaData(info) + if err != nil { + return nil, err + } + return dstObj, nil +} + +// DirCacheFlush resets the directory cache - used in testing as an +// optional interface +func (f *Fs) DirCacheFlush() { + f.dirCache.ResetRoot() +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.MD5) +} + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the SHA-1 of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + if t != hash.MD5 { + return "", hash.ErrUnsupported + } + err := o.readMetaData(ctx) + if err != nil { + return "", err + } + return o.md5, nil +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + err := o.readMetaData(context.TODO()) + if err != nil { + fs.Logf(o, "Failed to read metadata: %v", err) + return 0 + } + return o.size +} + +// setMetaData sets the metadata from info +func (o *Object) setMetaData(info *api.Item) (err error) { + if info.Type != api.ItemTypeFile { + return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type) + } + o.hasMetaData = true + o.size = info.Size + if !info.ModifiedAt.IsZero() { + o.modTime = info.ModifiedAt + } else { + o.modTime = info.CreatedAt + } + o.id = info.ID + o.md5 = info.Hash + return nil +} + +// readMetaData gets the metadata if it hasn't already been fetched +// +// it also sets the info +func (o *Object) readMetaData(ctx context.Context) (err error) { + if o.hasMetaData { + return nil + } + var info *api.Item + if o.id != "" { + info, err = o.fs.readMetaDataForID(ctx, o.id, false, true) + } else { + info, err = o.fs.readMetaDataForPath(ctx, o.remote, false, true) + } + if err != nil { + return err + } + return o.setMetaData(info) +} + +// ModTime returns the modification time of the object +// +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + err := o.readMetaData(ctx) + if err != nil { + fs.Logf(o, "Failed to read metadata: %v", err) + return time.Now() + } + return o.modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { + info, err := o.fs.updateItem(ctx, o.id, "", "", &modTime) + if err != nil { + return err + } + err = o.setMetaData(info) + if err != nil { + return err + } + return nil +} + +// Storable returns a boolean showing whether this object storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + opts := rest.Opts{ + Method: "GET", + Path: "/Items(" + o.id + ")/Download", + Parameters: url.Values{ + "redirect": {"false"}, + }, + } + var resp *http.Response + var dl api.DownloadSpecification + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl) + return shouldRetry(resp, err) + }) + if err != nil { + return nil, errors.Wrap(err, "open: fetch download specification") + } + + fs.FixRangeOption(options, o.size) + opts = rest.Opts{ + Path: "", + RootURL: dl.URL, + Method: "GET", + Options: options, + } + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.Call(ctx, &opts) + return shouldRetry(resp, err) + }) + if err != nil { + return nil, errors.Wrap(err, "open") + } + return resp.Body, err +} + +// Update the object with the contents of the io.Reader, modTime and size +// +// If existing is set then it updates the object rather than creating a new one +// +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + remote := o.Remote() + size := src.Size() + modTime := src.ModTime(ctx) + isLargeFile := size < 0 || size > int64(o.fs.opt.UploadCutoff) + + // Create the directory for the object if it doesn't exist + leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true) + if err != nil { + return err + } + leaf = replaceReservedChars(leaf) + var req = api.UploadRequest{ + Method: "standard", + Raw: true, + Filename: leaf, + Overwrite: true, + CreatedDate: modTime, + ModifiedDate: modTime, + Tool: fs.Config.UserAgent, + } + + if isLargeFile { + if size < 0 { + // For files of indeterminate size, use streamed + req.Method = "streamed" + } else { + // otherwise use threaded which is more efficient + req.Method = "threaded" + req.ThreadCount = &fs.Config.Transfers + req.Filesize = &size + } + } + + var resp *http.Response + var info api.UploadSpecification + opts := rest.Opts{ + Method: "POST", + Path: "/Items(" + directoryID + ")/Upload2", + } + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(ctx, &opts, &req, &info) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "upload get specification") + } + + // If file is large then upload in parts + if isLargeFile { + up, err := o.fs.newLargeUpload(ctx, o, in, src, &info) + if err != nil { + return err + } + return up.Upload(ctx) + } + + // Single part upload + opts = rest.Opts{ + Method: "POST", + RootURL: info.ChunkURI + "&fmt=json", + Body: in, + ContentLength: &size, + } + var finish api.UploadFinishResponse + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &finish) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "upload file") + } + return o.checkUploadResponse(ctx, &finish) +} + +// Check the upload response and update the metadata on the object +func (o *Object) checkUploadResponse(ctx context.Context, finish *api.UploadFinishResponse) (err error) { + // Find returned ID + id, err := finish.ID() + if err != nil { + return err + } + + // Read metadata + o.id = id + o.hasMetaData = false + return o.readMetaData(ctx) +} + +// Remove an object by ID +func (f *Fs) remove(ctx context.Context, id string) (err error) { + opts := rest.Opts{ + Method: "DELETE", + Path: "/Items(" + id + ")", + Parameters: url.Values{ + "singleversion": {"false"}, + "forceSync": {"true"}, + }, + NoResponse: true, + } + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.Call(ctx, &opts) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "remove") + } + return nil +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + err := o.readMetaData(ctx) + if err != nil { + return errors.Wrap(err, "Remove: Failed to read metadata") + } + return o.fs.remove(ctx, o.id) +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.id +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.PutStreamer = (*Fs)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.Object = (*Object)(nil) + _ fs.IDer = (*Object)(nil) +) diff --git a/backend/sharefile/sharefile_test.go b/backend/sharefile/sharefile_test.go new file mode 100644 index 000000000..0004610a6 --- /dev/null +++ b/backend/sharefile/sharefile_test.go @@ -0,0 +1,34 @@ +// Test filesystem interface +package sharefile + +import ( + "testing" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestSharefile:", + NilObject: (*Object)(nil), + ChunkedUpload: fstests.ChunkedUploadConfig{ + MinChunkSize: minChunkSize, + CeilChunkSize: fstests.NextPowerOfTwo, + }, + }) +} + +func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { + return f.setUploadChunkSize(cs) +} + +func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { + return f.setUploadCutoff(cs) +} + +var ( + _ fstests.SetUploadChunkSizer = (*Fs)(nil) + _ fstests.SetUploadCutoffer = (*Fs)(nil) +) diff --git a/backend/sharefile/tzdata_vfsdata.go b/backend/sharefile/tzdata_vfsdata.go new file mode 100644 index 000000000..b016f14a5 --- /dev/null +++ b/backend/sharefile/tzdata_vfsdata.go @@ -0,0 +1,193 @@ +// Code generated by vfsgen; DO NOT EDIT. + +// +build !dev + +package sharefile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + pathpkg "path" + "time" +) + +// tzdata statically implements the virtual filesystem provided to vfsgen. +var tzdata = func() http.FileSystem { + fs := vfsgen۰FS{ + "/": &vfsgen۰DirInfo{ + name: "/", + modTime: time.Date(2019, 9, 12, 14, 55, 27, 600751842, time.UTC), + }, + "/America": &vfsgen۰DirInfo{ + name: "America", + modTime: time.Date(2019, 9, 12, 14, 55, 27, 600751842, time.UTC), + }, + "/America/New_York": &vfsgen۰CompressedFileInfo{ + name: "New_York", + modTime: time.Date(2019, 7, 2, 0, 44, 57, 0, time.UTC), + uncompressedSize: 3536, + + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xd6\x7f\x54\xd5\xf5\x1d\xc7\xf1\xaf\x4a\x6a\x28\xa1\x2b\x9a\xa6\xb1\xa6\xdb\x08\x13\xf0\x47\xe4\x2f\xb6\x68\xc9\x18\xda\x0d\xd3\x4b\x22\x39\xfc\xd4\x20\x0e\xea\xc6\x67\xfd\x20\xdc\x0c\xdb\xb4\x18\x9e\xd3\xdc\x4e\x1e\xd8\x56\xe7\x36\x53\x94\x5c\x44\x0a\x38\xc9\x76\xfd\xc5\xc8\xe3\x8e\xab\xb8\x66\xd8\x8f\xeb\x5b\xa7\x77\x65\x92\x1f\xa4\x8c\xc9\xba\x77\xe7\xf9\x9e\xff\xed\x9f\xf9\xdf\xfe\x48\x4f\x3e\xfc\x9c\x13\xf0\x8f\xf7\xf5\x7c\xfb\x8b\xca\x1f\x9c\xe6\xfd\xd7\xaf\xab\x2e\xff\xc7\xaf\x73\x97\xff\x7e\xdd\x13\x9e\xe7\x05\xb6\x26\xdb\xe7\x5f\xfd\xd8\xfc\xe1\x29\xcf\x6e\xfa\xfd\x11\xf3\x42\xe9\x29\xbb\x79\xed\x47\xb2\x65\xf9\xcb\xb6\x21\x73\x9b\xd9\xba\xe8\xb0\xdb\x96\x54\x6b\x1a\xa7\xbf\xe4\x1a\xa3\x0d\xb2\xfd\xda\x5f\xb9\xed\xe1\x1a\xf9\x63\x9f\x75\x2f\x05\xcb\xa5\x29\xb4\xd0\xbd\x1c\x98\x2f\xcd\x2d\xb7\xba\x57\xaa\xd3\x64\xc7\x73\xf7\xd8\x9d\x65\xf3\x4c\xcb\xea\xe9\xb6\x35\x77\xb2\x69\x5b\x9a\x64\x77\xa5\x5c\x63\xfe\x34\xe7\x73\xbb\x7b\xa8\x33\xed\xe3\x8e\xdb\xf6\x48\x97\xd9\x13\xf7\x99\xdb\xd3\xd9\x6a\x5e\x3b\xfd\x8e\xfb\x73\xf3\x9b\x12\xec\x68\x77\x7b\x37\xec\x94\x7d\x5b\x9e\x75\xfb\x2b\x36\xca\x81\x75\x8f\xbb\x83\xf9\x95\xd2\x51\xb2\xcc\xfd\x25\xa3\x50\x3a\x7d\xab\xed\xeb\x89\xb3\xe5\x50\x5a\xb1\x3d\xd4\xbf\xd8\x1c\x4e\xc8\xb6\x87\xbb\x67\x99\xbf\xfe\xd2\xd9\xae\x89\x9f\xda\x2e\x33\x20\xa1\x47\x4f\xbb\xa3\xd9\x1f\xc8\xdb\x05\x9d\xee\xd8\x4d\x7b\xe5\x9d\xcc\x46\xd7\xed\x6d\x92\xe3\x49\xeb\xdd\x71\x59\x2b\xef\x46\xb7\xd9\xf7\xf6\x95\xca\xfb\xe1\x5a\xfb\xc1\x8b\xbf\x30\xe1\xe0\x0a\x7b\xa2\xb6\xc4\x48\xc0\x67\x4f\x96\x7f\xcf\x9c\xaa\xce\xb0\x7f\xcf\xbb\xd9\x9c\x2e\x1e\x6d\xcf\x2c\x4e\x97\x48\x6e\x9a\xfb\xc7\x8c\x51\xf2\x61\x4a\xa2\xfb\xe8\xfa\x0b\x72\x76\x68\xaf\x3b\x7b\xf1\xa8\x7c\x1c\x09\xb9\x73\xc7\x76\x49\x4f\x67\x9b\xfb\x64\x6f\xc8\x9c\x6f\xee\xb2\xee\xf9\x36\xd3\xbb\xa1\xd5\x5e\x58\x53\x6f\xfa\x2a\xea\xec\xa7\xcb\x56\x99\xcf\xf2\xab\xec\xc5\xdb\xef\x33\x9f\x67\x14\xd9\xfe\x9b\x1f\x93\x7f\x26\x66\xd9\x4b\xc3\x97\xc8\xa5\xfe\x42\x37\xf0\xe1\x1c\xf9\x57\xf7\x6c\xf7\xc5\xa1\x1b\x25\xba\x7b\xbc\x8b\x6d\x8b\x89\x57\x1f\x75\x83\x6a\x4e\xca\xe0\xc7\xc4\x0d\xb1\x51\x13\x67\xbe\xb0\x57\x2d\x10\x33\x34\xfb\x84\x1d\x36\xe5\x80\x19\xf6\xf4\x58\x7b\xf5\xa8\x2d\xe6\xea\xa6\x8d\x2e\xde\x3d\x65\x46\xc8\x93\x76\xe4\xf1\x17\x24\x61\x5f\x99\xbd\xa6\x7d\x9d\x24\xbe\xb8\xd6\x8d\xfa\xdd\x83\x32\xba\xb6\xd4\x7d\x65\xd5\xf7\xe5\xda\xf2\x5c\x77\xdd\x92\x49\x92\x94\x97\xea\xae\x9f\x35\x52\xbe\x9a\x3a\xc2\x8d\x99\x90\x6a\xc6\x0e\xef\x71\x37\x0c\x1e\x61\x6e\xb8\x10\x6f\xc7\x9d\xec\x31\xe3\xdf\x3a\x67\x6f\xdc\xff\x86\x49\xde\xf1\x37\xfb\xb5\x4d\x3b\xcc\x4d\x95\x9e\xfb\xfa\xcf\x9f\x31\x13\x0a\x4e\xb9\x89\xcb\x9b\xe5\x1b\x99\x1d\xee\x9b\xf3\x7e\x23\xdf\x4a\xda\xea\x52\x26\x3d\x2c\x29\xd1\x83\x36\x35\xbe\x40\x52\xc3\x0d\x76\xd2\xd9\x19\x72\x4b\xb0\xc6\x4e\x0e\xf9\x4d\x5a\xa0\xdc\xa6\xb7\xdc\x66\x32\xaa\xe7\xdb\x29\xcf\x8c\x31\x53\xcb\xca\xdc\xb4\x87\x2e\x99\xe9\xb9\x79\xee\xd6\x85\xef\x9b\xcc\x94\x5b\xdc\x6d\xd3\x82\x66\xc6\xb0\x04\x37\x73\xdc\xbb\x32\x33\x72\xde\xcd\x1a\x78\x4d\x66\x77\xbe\xe5\xe6\xbc\x17\x90\xac\xe6\x4f\xec\xb7\xf7\x3c\x21\xdf\xd9\xf0\xa6\xbd\xfd\xd9\x07\x24\xbb\x62\xa7\xbd\x63\xdd\x1a\xf3\xdd\xfc\x8d\xf6\xce\x92\xfb\xcd\xdc\x8c\x4a\x9b\x33\xf7\x4e\x93\xd3\xd7\xe8\x72\xd3\x96\x49\x6e\x68\xbd\xcb\x4b\xb8\x43\xf2\x5a\x56\xba\x79\x3d\x13\x65\xfe\x73\xb5\xf6\xae\x63\xd9\xc6\xb7\x7a\x85\xbd\x7b\xd7\x04\x93\xbf\xd4\x67\x17\xd4\xc5\x99\x7b\xb2\x32\xec\xc2\x47\x23\x66\xd1\xf8\xd1\xd6\x5f\x70\xc8\xf8\x07\xfa\xec\xbd\x99\xdb\xcd\xbd\x67\x12\x5d\x61\x72\xa7\x14\x76\xf4\xba\x25\xd1\x46\x29\xda\x12\x72\xf7\x85\xd7\xcb\xd2\x75\x6d\xee\x07\xc1\x95\x52\x5c\x52\xef\x96\x05\xee\x16\xe3\x6b\xb5\xf7\xd7\xac\x30\x0f\xa4\xd5\xd9\x1f\x96\xf9\x4c\x49\x42\x95\x2d\xcd\xcd\x30\xa5\x3d\x45\xb6\x2c\x65\xb4\x29\x3b\x92\x65\xcb\x87\xf6\x99\xf2\xa6\x64\xbb\x3c\xf2\xb6\x59\x51\x37\xdb\xad\x7c\xa3\x57\x7e\x54\x39\xde\xfd\xb8\x39\x24\x15\x05\x51\x67\x37\xb4\xc9\x4f\x32\xc5\x3d\x54\x51\x2f\x0f\x27\x1d\x70\x8f\xe4\xaf\x92\x47\xa2\x27\x6c\xe5\xcc\x3a\x53\x19\xde\x6f\xab\xc6\x54\x99\xaa\xe0\x66\xbb\xaa\xbf\xc8\xfc\x34\xf0\xa4\xfd\x59\x77\x96\x59\x5d\x5d\x66\x1f\xdf\x9d\x6c\xaa\x8b\xf3\xec\x9a\xdf\x7a\x66\xf0\xa0\x2b\xfc\x3d\x24\xee\x8a\xbf\xe4\xff\xe5\x77\x2c\xf6\x6a\xc0\xf3\x62\xb1\xd7\xf7\x0d\x8a\x8b\xc5\xda\x5f\xf1\x86\xeb\xdf\x47\xea\x9f\xa3\xee\xf2\xf9\xbd\x9c\xb9\x7e\x2f\x67\x91\xdf\xcb\x59\xec\xf7\x72\x16\xf8\x75\xda\x06\xe9\x1f\x57\xb2\x81\xb1\x58\x2c\x56\x3c\xc4\xfd\x1a\xd9\x42\x64\x0f\x91\x4d\x44\x76\x11\xd9\x46\x64\x1f\x91\x8d\x44\x76\x12\xd9\x4a\x64\x2f\x91\xcd\x54\xa3\x0d\xfa\xff\xb3\x9d\x6a\xb8\x46\xdf\x6c\x28\xb2\xa3\xc8\x96\x22\x7b\x8a\x6c\x2a\xb2\xab\xc8\xb6\x22\xfb\x8a\x6c\x2c\xb2\xb3\xc8\xd6\x22\x7b\x8b\x6c\x2e\xb2\xbb\xc8\xf6\xaa\x91\x2e\x7d\xb3\xc1\x6a\x67\xab\xbe\xd9\x62\x64\x8f\x91\x4d\x46\x76\x19\xd9\x66\x64\x9f\x91\x8d\x46\x76\x1a\xd9\x6a\x64\xaf\x91\xcd\x46\x76\x1b\xd9\x6e\xb5\x7f\xb1\xfe\x3c\x36\x5c\xed\x9e\xa5\x6f\xb6\x1c\xd9\x73\xd5\x0c\xe8\xd7\xb1\xeb\xc8\xb6\x23\xfb\x8e\x6c\x3c\xb2\xf3\xc8\xd6\x23\x7b\xaf\xca\x5a\x7d\xb3\xfb\xc8\xf6\x23\xfb\x8f\x34\x00\xe9\x00\xd2\x02\xa4\x07\x48\x13\x90\x2e\x20\x6d\x40\xfa\x80\x34\x02\xe9\x04\xd2\x0a\xa4\x17\x48\x33\x90\x6e\xa8\x17\x8f\xea\x9b\x7e\x20\x0d\x41\x3a\x82\xb4\x04\xe9\x09\xd2\x14\xa4\x2b\x48\x5b\x90\xbe\x20\x8d\x41\x3a\x83\xb4\x06\xe9\x0d\xd2\x1c\xa4\x3b\x48\x7b\xd4\xfe\x42\xfd\x79\x34\x08\xe9\x10\xd2\x22\xd4\x1e\xe9\x3f\xe4\x98\xe8\xa7\xa5\x3e\xea\xf4\x83\x55\x73\x52\xdf\xf4\x09\x69\x14\xd2\x29\xfd\x80\x2d\x10\x7d\xd3\x2b\xa4\x59\xea\xd3\x63\xf5\x4d\xbb\xd4\xa6\x8d\xfa\xf5\x34\x0c\xe9\x18\xd2\x32\xa4\x67\x48\xd3\x90\xae\x21\x6d\x43\xfa\x86\x34\x0e\xe9\x1c\xd2\x3a\xa4\x77\x48\xf3\x90\xee\x21\xed\x43\xfa\x87\x34\x50\xbd\x10\xaf\x3f\x8f\x16\x22\x3d\x44\x9a\x88\x74\x11\x69\x23\xd2\x47\xa4\x91\x48\x27\x91\x56\x22\xbd\x44\x9a\x89\x74\x13\x69\xa7\x1a\x3d\xa8\xdf\x8f\x86\xaa\xe1\x06\x7d\xd3\x52\xa4\xa7\x48\x53\x91\xae\x22\x6d\x45\xfa\x8a\x34\x16\xe9\x2c\xd2\x5a\xa4\xb7\x48\x73\x91\xee\x22\xed\x45\xfa\x8b\x34\x58\x8d\x9c\xd7\x37\x2d\x46\x7a\x8c\x34\x19\xe9\x32\xd2\x66\xa4\xcf\x48\xa3\x91\x4e\x23\xad\x46\x7a\x8d\x34\x1b\xe9\x36\xd2\x6e\xb5\xaf\x51\xbf\x3f\x0d\x57\x43\xeb\xf5\x4d\xcb\xd5\x96\x95\xfa\xa6\xe9\x48\xd7\x91\xb6\x23\x7d\x47\x1a\x8f\x74\x1e\x69\x3d\xd2\x7b\xa4\xf9\x48\xf7\x91\xf6\xab\x03\x7d\xfa\xe6\x06\x50\xcf\x24\xea\xcf\xe3\x16\x50\x3b\x7a\xf5\xcd\x4d\x80\xdc\x05\xc8\x6d\x80\xdc\x07\xc8\x8d\x80\xdc\x09\xc8\xad\x80\xdc\x0b\xc8\xcd\x80\xdc\x0d\xc8\xed\x80\xdc\x0f\xc8\x0d\xa1\xf6\x14\xe9\x9b\x5b\x42\x3d\x92\xa5\x6f\x6e\x0a\xb5\x29\x59\xdf\xdc\x16\xc8\x7d\x81\xdc\x18\xc8\x9d\x81\xdc\x1a\xc8\xbd\x81\xdc\x1c\xc8\xdd\x81\xdc\x1e\xc8\xfd\x81\xdc\x20\x6a\xf4\x3f\x9f\x57\x6e\x11\x35\xbc\x5f\xdf\xdc\x24\x6a\x70\xb3\xbe\xb9\x4d\x90\xfb\x04\xb9\x51\x90\x3b\x05\xb9\x55\x90\x7b\x05\xbf\xbc\x59\xfe\xf7\x9b\x25\x3e\x67\x91\x3f\x33\x67\xae\x7f\xb2\x6f\x7a\xfa\xb4\xf4\x29\x93\x7d\x53\xa7\xa6\x4f\x4d\x9f\x12\xff\xef\x00\x00\x00\xff\xff\x96\x2d\xbf\x9f\xd0\x0d\x00\x00"), + }, + } + fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ + fs["/America"].(os.FileInfo), + } + fs["/America"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ + fs["/America/New_York"].(os.FileInfo), + } + + return fs +}() + +type vfsgen۰FS map[string]interface{} + +func (fs vfsgen۰FS) Open(path string) (http.File, error) { + path = pathpkg.Clean("/" + path) + f, ok := fs[path] + if !ok { + return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist} + } + + switch f := f.(type) { + case *vfsgen۰CompressedFileInfo: + gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent)) + if err != nil { + // This should never happen because we generate the gzip bytes such that they are always valid. + panic("unexpected error reading own gzip compressed bytes: " + err.Error()) + } + return &vfsgen۰CompressedFile{ + vfsgen۰CompressedFileInfo: f, + gr: gr, + }, nil + case *vfsgen۰DirInfo: + return &vfsgen۰Dir{ + vfsgen۰DirInfo: f, + }, nil + default: + // This should never happen because we generate only the above types. + panic(fmt.Sprintf("unexpected type %T", f)) + } +} + +// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file. +type vfsgen۰CompressedFileInfo struct { + name string + modTime time.Time + compressedContent []byte + uncompressedSize int64 +} + +func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot Readdir from file %s", f.name) +} +func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil } + +func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte { + return f.compressedContent +} + +func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name } +func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize } +func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 } +func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime } +func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false } +func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil } + +// vfsgen۰CompressedFile is an opened compressedFile instance. +type vfsgen۰CompressedFile struct { + *vfsgen۰CompressedFileInfo + gr *gzip.Reader + grPos int64 // Actual gr uncompressed position. + seekPos int64 // Seek uncompressed position. +} + +func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) { + if f.grPos > f.seekPos { + // Rewind to beginning. + err = f.gr.Reset(bytes.NewReader(f.compressedContent)) + if err != nil { + return 0, err + } + f.grPos = 0 + } + if f.grPos < f.seekPos { + // Fast-forward. + _, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos) + if err != nil { + return 0, err + } + f.grPos = f.seekPos + } + n, err = f.gr.Read(p) + f.grPos += int64(n) + f.seekPos = f.grPos + return n, err +} +func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + f.seekPos = 0 + offset + case io.SeekCurrent: + f.seekPos += offset + case io.SeekEnd: + f.seekPos = f.uncompressedSize + offset + default: + panic(fmt.Errorf("invalid whence value: %v", whence)) + } + return f.seekPos, nil +} +func (f *vfsgen۰CompressedFile) Close() error { + return f.gr.Close() +} + +// vfsgen۰DirInfo is a static definition of a directory. +type vfsgen۰DirInfo struct { + name string + modTime time.Time + entries []os.FileInfo +} + +func (d *vfsgen۰DirInfo) Read([]byte) (int, error) { + return 0, fmt.Errorf("cannot Read from directory %s", d.name) +} +func (d *vfsgen۰DirInfo) Close() error { return nil } +func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil } + +func (d *vfsgen۰DirInfo) Name() string { return d.name } +func (d *vfsgen۰DirInfo) Size() int64 { return 0 } +func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir } +func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime } +func (d *vfsgen۰DirInfo) IsDir() bool { return true } +func (d *vfsgen۰DirInfo) Sys() interface{} { return nil } + +// vfsgen۰Dir is an opened dir instance. +type vfsgen۰Dir struct { + *vfsgen۰DirInfo + pos int // Position within entries for Seek and Readdir. +} + +func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) { + if offset == 0 && whence == io.SeekStart { + d.pos = 0 + return 0, nil + } + return 0, fmt.Errorf("unsupported Seek in directory %s", d.name) +} + +func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) { + if d.pos >= len(d.entries) && count > 0 { + return nil, io.EOF + } + if count <= 0 || count > len(d.entries)-d.pos { + count = len(d.entries) - d.pos + } + e := d.entries[d.pos : d.pos+count] + d.pos += count + return e, nil +} diff --git a/backend/sharefile/update-timezone.sh b/backend/sharefile/update-timezone.sh new file mode 100755 index 000000000..ae5eecc63 --- /dev/null +++ b/backend/sharefile/update-timezone.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e + +# Extract just the America/New_York timezone from +tzinfo=$(go env GOROOT)/lib/time/zoneinfo.zip + +rm -rf tzdata +mkdir tzdata +cd tzdata +unzip ${tzinfo} America/New_York + +cd .. +# Make the embedded assets +go run generate_tzdata.go + +# tidy up +rm -rf tzdata diff --git a/backend/sharefile/upload.go b/backend/sharefile/upload.go new file mode 100644 index 000000000..05906978d --- /dev/null +++ b/backend/sharefile/upload.go @@ -0,0 +1,261 @@ +// Upload large files for sharefile +// +// Docs - https://api.sharefile.com/rest/docs/resource.aspx?name=Items#Upload_File + +package sharefile + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/rclone/rclone/backend/sharefile/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/lib/readers" + "github.com/rclone/rclone/lib/rest" +) + +// largeUpload is used to control the upload of large files which need chunking +type largeUpload struct { + ctx context.Context + f *Fs // parent Fs + o *Object // object being uploaded + in io.Reader // read the data from here + wrap accounting.WrapFn // account parts being transferred + size int64 // total size + parts int64 // calculated number of parts, if known + info *api.UploadSpecification // where to post chunks etc + threads int // number of threads to use in upload + streamed bool // set if using streamed upload +} + +// newLargeUpload starts an upload of object o from in with metadata in src +func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, info *api.UploadSpecification) (up *largeUpload, err error) { + size := src.Size() + parts := int64(-1) + if size >= 0 { + parts = size / int64(o.fs.opt.ChunkSize) + if size%int64(o.fs.opt.ChunkSize) != 0 { + parts++ + } + } + + var streamed bool + switch strings.ToLower(info.Method) { + case "streamed": + streamed = true + case "threaded": + streamed = false + default: + return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method) + } + + threads := fs.Config.Transfers + if threads > info.MaxNumberOfThreads { + threads = info.MaxNumberOfThreads + } + + // unwrap the accounting from the input, we use wrap to put it + // back on after the buffering + in, wrap := accounting.UnWrap(in) + up = &largeUpload{ + ctx: ctx, + f: f, + o: o, + in: in, + wrap: wrap, + size: size, + threads: threads, + info: info, + parts: parts, + streamed: streamed, + } + return up, nil +} + +// parse the api.UploadFinishResponse in respBody +func (up *largeUpload) parseUploadFinishResponse(respBody []byte) (err error) { + var finish api.UploadFinishResponse + err = json.Unmarshal(respBody, &finish) + if err != nil { + // Sometimes the unmarshal fails in which case return the body + return errors.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody)) + } + return up.o.checkUploadResponse(up.ctx, &finish) +} + +// Transfer a chunk +func (up *largeUpload) transferChunk(ctx context.Context, part int64, offset int64, body []byte, fileHash string) error { + md5sumRaw := md5.Sum(body) + md5sum := hex.EncodeToString(md5sumRaw[:]) + size := int64(len(body)) + + // Add some more parameters to the ChunkURI + u := up.info.ChunkURI + u += fmt.Sprintf("&index=%d&byteOffset=%d&hash=%s&fmt=json", + part, offset, md5sum, + ) + if fileHash != "" { + u += fmt.Sprintf("&finish=true&fileSize=%d&fileHash=%s", + offset+int64(len(body)), + fileHash, + ) + } + opts := rest.Opts{ + Method: "POST", + RootURL: u, + ContentLength: &size, + } + var respBody []byte + err := up.f.pacer.Call(func() (bool, error) { + fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body)) + opts.Body = up.wrap(bytes.NewReader(body)) + resp, err := up.f.srv.Call(ctx, &opts) + if err != nil { + fs.Debugf(up.o, "Error sending chunk %d: %v", part, err) + } else { + respBody, err = rest.ReadBody(resp) + } + // retry all errors now that the multipart upload has started + return err != nil, err + }) + if err != nil { + fs.Debugf(up.o, "Error sending chunk %d: %v", part, err) + return err + } + // If last chunk and using "streamed" transfer, get the response back now + if up.streamed && fileHash != "" { + return up.parseUploadFinishResponse(respBody) + } + fs.Debugf(up.o, "Done sending chunk %d", part) + return nil +} + +// finish closes off the large upload and reads the metadata +func (up *largeUpload) finish(ctx context.Context) error { + fs.Debugf(up.o, "Finishing large file upload") + // For a streamed transfer we will already have read the info + if up.streamed { + return nil + } + + opts := rest.Opts{ + Method: "POST", + RootURL: up.info.FinishURI, + } + var respBody []byte + err := up.f.pacer.Call(func() (bool, error) { + resp, err := up.f.srv.Call(ctx, &opts) + if err != nil { + return shouldRetry(resp, err) + } + respBody, err = rest.ReadBody(resp) + // retry all errors now that the multipart upload has started + return err != nil, err + }) + if err != nil { + return err + } + return up.parseUploadFinishResponse(respBody) +} + +// Upload uploads the chunks from the input +func (up *largeUpload) Upload(ctx context.Context) error { + if up.parts >= 0 { + fs.Debugf(up.o, "Starting upload of large file in %d chunks", up.parts) + } else { + fs.Debugf(up.o, "Starting streaming upload of large file") + } + var ( + offset int64 + errs = make(chan error, 1) + wg sync.WaitGroup + err error + wholeFileHash = md5.New() + eof = false + ) +outer: + for part := int64(0); !eof; part++ { + // Check any errors + select { + case err = <-errs: + break outer + default: + } + + // Get a block of memory + buf := up.f.getUploadBlock() + + // Read the chunk + var n int + n, err = readers.ReadFill(up.in, buf) + if err == io.EOF { + eof = true + buf = buf[:n] + err = nil + } else if err != nil { + up.f.putUploadBlock(buf) + break outer + } + + // Hash it + _, _ = io.Copy(wholeFileHash, bytes.NewBuffer(buf)) + + // Get file hash if was last chunk + fileHash := "" + if eof { + fileHash = hex.EncodeToString(wholeFileHash.Sum(nil)) + } + + // Transfer the chunk + wg.Add(1) + transferChunk := func(part, offset int64, buf []byte, fileHash string) { + defer wg.Done() + defer up.f.putUploadBlock(buf) + err := up.transferChunk(ctx, part, offset, buf, fileHash) + if err != nil { + select { + case errs <- err: + default: + } + } + } + if up.streamed { + transferChunk(part, offset, buf, fileHash) // streamed + } else { + go transferChunk(part, offset, buf, fileHash) // multithreaded + } + + offset += int64(n) + } + wg.Wait() + + // check size read is correct + if eof && err == nil && up.size >= 0 && up.size != offset { + err = errors.Errorf("upload: short read: read %d bytes expected %d", up.size, offset) + } + + // read any errors + if err == nil { + select { + case err = <-errs: + default: + } + } + + // finish regardless of errors + finishErr := up.finish(ctx) + if err == nil { + err = finishErr + } + + return err +} diff --git a/bin/make_manual.py b/bin/make_manual.py index 8b35b9f17..a44c3fd05 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -32,6 +32,7 @@ docs = [ "box.md", "cache.md", "chunker.md", + "sharefile.md", "crypt.md", "dropbox.md", "ftp.md", diff --git a/docs/content/about.md b/docs/content/about.md index be083fb56..dd2283bc3 100644 --- a/docs/content/about.md +++ b/docs/content/about.md @@ -17,6 +17,7 @@ Rclone is a command line program to sync files and directories to and from: * {{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}} * {{< provider name="Box" home="https://www.box.com/" config="/box/" >}} * {{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}} +* {{< provider name="Citrix ShareFile" home="http://sharefile.com/" config="/sharefile/" >}} * {{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/sftp/#c14" >}} * {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}} * {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}} diff --git a/docs/content/docs.md b/docs/content/docs.md index 61ea727cc..bfd6178fa 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -27,6 +27,7 @@ See the following for detailed instructions for * [Box](/box/) * [Cache](/cache/) * [Chunker](/chunker/) - transparently splits large files for other remotes + * [Citrix ShareFile](/sharefile/) * [Crypt](/crypt/) - to encrypt other remotes * [DigitalOcean Spaces](/s3/#digitalocean-spaces) * [Dropbox](/dropbox/) diff --git a/docs/content/overview.md b/docs/content/overview.md index 7f7fe5e89..7557ab1ba 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -22,6 +22,7 @@ Here is an overview of the major features of each cloud storage system. | Amazon S3 | MD5 | Yes | No | No | R/W | | Backblaze B2 | SHA1 | Yes | No | No | R/W | | Box | SHA1 | Yes | Yes | No | - | +| Citrix ShareFile | MD5 | Yes | Yes | No | - | | Dropbox | DBHASH † | Yes | Yes | No | - | | FTP | - | No | No | No | - | | Google Cloud Storage | MD5 | Yes | No | No | R/W | @@ -143,6 +144,7 @@ operations more efficient. | Amazon S3 | No | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No | | Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | | Box | Yes | Yes | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | Yes | Yes | No | Yes | +| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | Yes | No | No | Yes | | Dropbox | Yes | Yes | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | Yes | Yes | Yes | Yes | | FTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | Yes | | Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No | diff --git a/docs/content/sharefile.md b/docs/content/sharefile.md new file mode 100644 index 000000000..70de59515 --- /dev/null +++ b/docs/content/sharefile.md @@ -0,0 +1,224 @@ +--- +title: "Citrix ShareFile" +description: "Rclone docs for Citrix ShareFile" +date: "2019-09-30" +--- + +## Citrix ShareFile + +[Citrix ShareFile](https://sharefile.com) is a secure file sharing and transfer service aimed as business. + +The initial setup for Citrix ShareFile involves getting a token from +Citrix ShareFile which you can in your browser. `rclone config` walks you +through it. + +Here is an example of how to make a remote called `remote`. First run: + + rclone config + +This will guide you through an interactive setup process: + +``` +No remotes found - make a new one +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> remote +Type of storage to configure. +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value +XX / Citrix Sharefile + \ "sharefile" +Storage> sharefile +** See help for sharefile backend at: https://rclone.org/sharefile/ ** + +ID of the root folder + +Leave blank to access "Personal Folders". You can use one of the +standard values here or any folder ID (long hex number ID). +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value + 1 / Access the Personal Folders. (Default) + \ "" + 2 / Access the Favorites folder. + \ "favorites" + 3 / Access all the shared folders. + \ "allshared" + 4 / Access all the individual connectors. + \ "connectors" + 5 / Access the home, favorites, and shared folders as well as the connectors. + \ "top" +root_folder_id> +Edit advanced config? (y/n) +y) Yes +n) No +y/n> n +Remote config +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine +y) Yes +n) No +y/n> y +If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=XXX +Log in and authorize rclone for access +Waiting for code... +Got code +-------------------- +[remote] +type = sharefile +endpoint = https://XXX.sharefile.com +token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"} +-------------------- +y) Yes this is OK +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +See the [remote setup docs](/remote_setup/) for how to set it up on a +machine with no Internet browser available. + +Note that rclone runs a webserver on your local machine to collect the +token as returned from Citrix ShareFile. This only runs from the moment it opens +your browser to the moment you get back the verification code. This +is on `http://127.0.0.1:53682/` and this it may require you to unblock +it temporarily if you are running a host firewall. + +Once configured you can then use `rclone` like this, + +List directories in top level of your ShareFile + + rclone lsd remote: + +List all the files in your ShareFile + + rclone ls remote: + +To copy a local directory to an ShareFile directory called backup + + rclone copy /home/source remote:backup + +Paths may be as deep as required, eg `remote:directory/subdirectory`. + +### Modified time and hashes ### + +ShareFile allows modification times to be set on objects accurate to 1 +second. These will be used to detect whether objects need syncing or +not. + +ShareFile supports MD5 type hashes, so you can use the `--checksum` +flag. + +### Transfers ### + +For files above 128MB rclone will use a chunked transfer. Rclone will +upload up to `--transfers` chunks at the same time (shared among all +the multipart uploads). Chunks are buffered in memory and are +normally 64MB so increasing `--transfers` will increase memory use. + +### Limitations ### + +Note that ShareFile is case insensitive so you can't have a file called +"Hello.doc" and one called "hello.doc". + +ShareFile only supports filenames up to 256 characters in length. + +#### Restricted filename characters + +In addition to the [default restricted characters set](/overview/#restricted-characters) +the following characters are also replaced: + +| Character | Value | Replacement | +| --------- |:-----:|:-----------:| +| \\ | 0x5C | \ | +| * | 0x2A | * | +| < | 0x3C | < | +| > | 0x3E | > | +| ? | 0x3F | ? | +| : | 0x3A | : | +| \| | 0x7C | | | +| " | 0x22 | " | + +File names can also not start or end with the following characters. +These only get replaced if they are first or last character in the +name: + +| Character | Value | Replacement | +| --------- |:-----:|:-----------:| +| SP | 0x20 | ␠ | +| . | 0x2E | . | + +Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8), +as they can't be used in JSON strings. + + +### Standard Options + +Here are the standard options specific to sharefile (Citrix Sharefile). + +#### --sharefile-root-folder-id + +ID of the root folder + +Leave blank to access "Personal Folders". You can use one of the +standard values here or any folder ID (long hex number ID). + +- Config: root_folder_id +- Env Var: RCLONE_SHAREFILE_ROOT_FOLDER_ID +- Type: string +- Default: "" +- Examples: + - "" + - Access the Personal Folders. (Default) + - "favorites" + - Access the Favorites folder. + - "allshared" + - Access all the shared folders. + - "connectors" + - Access all the individual connectors. + - "top" + - Access the home, favorites, and shared folders as well as the connectors. + +### Advanced Options + +Here are the advanced options specific to sharefile (Citrix Sharefile). + +#### --sharefile-upload-cutoff + +Cutoff for switching to multipart upload. + +- Config: upload_cutoff +- Env Var: RCLONE_SHAREFILE_UPLOAD_CUTOFF +- Type: SizeSuffix +- Default: 128M + +#### --sharefile-chunk-size + +Upload chunk size. Must a power of 2 >= 256k. + +Making this larger will improve performance, but note that each chunk +is buffered in memory one per transfer. + +Reducing this will reduce memory usage but decrease performance. + +- Config: chunk_size +- Env Var: RCLONE_SHAREFILE_CHUNK_SIZE +- Type: SizeSuffix +- Default: 64M + +#### --sharefile-endpoint + +Endpoint for API calls. + +This is usually auto discovered as part of the oauth process, but can +be set manually to something like: https://XXX.sharefile.com + + +- Config: endpoint +- Env Var: RCLONE_SHAREFILE_ENDPOINT +- Type: string +- Default: "" + + diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 4f5e4bd99..4000bfe8d 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -64,6 +64,7 @@
  • Box
  • Cache
  • Chunker (splits large files)
  • +
  • Citrix ShareFile
  • Crypt (encrypts the others)
  • Dropbox
  • FTP
  • diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index 31201bf36..7b136e0c3 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -187,7 +187,9 @@ backends: fastlist: false - backend: "putio" remote: "TestPutio:" - subdir: false + fastlist: false + - backend: "sharefile" + remote: "TestSharefile:" fastlist: false - backend: "mailru" remote: "TestMailru:"