From c754e89906543cfb4f155201f7f8030f62cf122e Mon Sep 17 00:00:00 2001 From: Fred Date: Sat, 25 Apr 2020 18:55:18 +0100 Subject: [PATCH] seafile: New backend for seafile server --- README.md | 1 + backend/all/all.go | 1 + backend/seafile/api/types.go | 153 ++ backend/seafile/object.go | 127 ++ backend/seafile/pacer.go | 67 + backend/seafile/seafile.go | 1247 +++++++++++++++++ backend/seafile/seafile_internal_test.go | 123 ++ backend/seafile/seafile_test.go | 17 + backend/seafile/webapi.go | 1083 ++++++++++++++ bin/make_manual.py | 1 + docs/content/about.md | 1 + docs/content/docs.md | 1 + docs/content/overview.md | 2 + docs/content/seafile.md | 251 ++++ docs/layouts/chrome/navbar.html | 1 + fstest/test_all/config.yaml | 11 + fstest/testserver/init.d/TestSeafile | 60 + fstest/testserver/init.d/TestSeafileEncrypted | 65 + fstest/testserver/init.d/TestSeafileV6 | 48 + .../init.d/seafile/docker-compose.yml | 31 + 20 files changed, 3291 insertions(+) create mode 100644 backend/seafile/api/types.go create mode 100644 backend/seafile/object.go create mode 100644 backend/seafile/pacer.go create mode 100644 backend/seafile/seafile.go create mode 100644 backend/seafile/seafile_internal_test.go create mode 100644 backend/seafile/seafile_test.go create mode 100644 backend/seafile/webapi.go create mode 100644 docs/content/seafile.md create mode 100755 fstest/testserver/init.d/TestSeafile create mode 100755 fstest/testserver/init.d/TestSeafileEncrypted create mode 100755 fstest/testserver/init.d/TestSeafileV6 create mode 100644 fstest/testserver/init.d/seafile/docker-compose.yml diff --git a/README.md b/README.md index 6aff6b0e6..ff4ad14b8 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and * QingStor [:page_facing_up:](https://rclone.org/qingstor/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) + * Seafile [:page_facing_up:](https://rclone.org/seafile/) * SFTP [:page_facing_up:](https://rclone.org/sftp/) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) diff --git a/backend/all/all.go b/backend/all/all.go index 3b693a1cd..ec5890747 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -31,6 +31,7 @@ import ( _ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/s3" + _ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sugarsync" diff --git a/backend/seafile/api/types.go b/backend/seafile/api/types.go new file mode 100644 index 000000000..4df24767c --- /dev/null +++ b/backend/seafile/api/types.go @@ -0,0 +1,153 @@ +package api + +// Some api objects are duplicated with only small differences, +// it's because the returned JSON objects are very inconsistent between api calls + +// AuthenticationRequest contains user credentials +type AuthenticationRequest struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// AuthenticationResult is returned by a call to the authentication api +type AuthenticationResult struct { + Token string `json:"token"` + Errors []string `json:"non_field_errors"` +} + +// AccountInfo contains simple user properties +type AccountInfo struct { + Usage int64 `json:"usage"` + Total int64 `json:"total"` + Email string `json:"email"` + Name string `json:"name"` +} + +// ServerInfo contains server information +type ServerInfo struct { + Version string `json:"version"` +} + +// DefaultLibrary when none specified +type DefaultLibrary struct { + ID string `json:"repo_id"` + Exists bool `json:"exists"` +} + +// CreateLibraryRequest contains the information needed to create a library +type CreateLibraryRequest struct { + Name string `json:"name"` + Description string `json:"desc"` + Password string `json:"passwd"` +} + +// Library properties. Please note not all properties are going to be useful for rclone +type Library struct { + Encrypted bool `json:"encrypted"` + Owner string `json:"owner"` + ID string `json:"id"` + Size int `json:"size"` + Name string `json:"name"` + Modified int64 `json:"mtime"` +} + +// CreateLibrary properties. Seafile is not consistent and returns different types for different API calls +type CreateLibrary struct { + ID string `json:"repo_id"` + Name string `json:"repo_name"` +} + +// FileType is either "dir" or "file" +type FileType string + +// File types +var ( + FileTypeDir FileType = "dir" + FileTypeFile FileType = "file" +) + +// FileDetail contains file properties (for older api v2.0) +type FileDetail struct { + ID string `json:"id"` + Type FileType `json:"type"` + Name string `json:"name"` + Size int64 `json:"size"` + Parent string `json:"parent_dir"` + Modified string `json:"last_modified"` +} + +// DirEntries contains a list of DirEntry +type DirEntries struct { + Entries []DirEntry `json:"dirent_list"` +} + +// DirEntry contains a directory entry +type DirEntry struct { + ID string `json:"id"` + Type FileType `json:"type"` + Name string `json:"name"` + Size int64 `json:"size"` + Path string `json:"parent_dir"` + Modified int64 `json:"mtime"` +} + +// Operation is move, copy or rename +type Operation string + +// Operations +var ( + CopyFileOperation Operation = "copy" + MoveFileOperation Operation = "move" + RenameFileOperation Operation = "rename" +) + +// FileOperationRequest is sent to the api to copy, move or rename a file +type FileOperationRequest struct { + Operation Operation `json:"operation"` + DestinationLibraryID string `json:"dst_repo"` // For copy/move operation + DestinationPath string `json:"dst_dir"` // For copy/move operation + NewName string `json:"newname"` // Only to be used by the rename operation +} + +// FileInfo is returned by a server file copy/move/rename (new api v2.1) +type FileInfo struct { + Type string `json:"type"` + LibraryID string `json:"repo_id"` + Path string `json:"parent_dir"` + Name string `json:"obj_name"` + ID string `json:"obj_id"` + Size int64 `json:"size"` +} + +// CreateDirRequest only contain an operation field +type CreateDirRequest struct { + Operation string `json:"operation"` +} + +// DirectoryDetail contains the directory details specific to the getDirectoryDetails call +type DirectoryDetail struct { + ID string `json:"repo_id"` + Name string `json:"name"` + Path string `json:"path"` +} + +// ShareLinkRequest contains the information needed to create or list shared links +type ShareLinkRequest struct { + LibraryID string `json:"repo_id"` + Path string `json:"path"` +} + +// SharedLink contains the information returned by a call to shared link creation +type SharedLink struct { + Link string `json:"link"` + IsExpired bool `json:"is_expired"` +} + +// BatchSourceDestRequest contains JSON parameters for sending a batch copy or move operation +type BatchSourceDestRequest struct { + SrcLibraryID string `json:"src_repo_id"` + SrcParentDir string `json:"src_parent_dir"` + SrcItems []string `json:"src_dirents"` + DstLibraryID string `json:"dst_repo_id"` + DstParentDir string `json:"dst_parent_dir"` +} diff --git a/backend/seafile/object.go b/backend/seafile/object.go new file mode 100644 index 000000000..f214331f5 --- /dev/null +++ b/backend/seafile/object.go @@ -0,0 +1,127 @@ +package seafile + +import ( + "context" + "io" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/hash" +) + +// Object describes a seafile object (also commonly called a file) +type Object struct { + fs *Fs // what this object is part of + id string // internal ID of object + remote string // The remote path (full path containing library name if target at root) + pathInLibrary string // Path of the object without the library name + size int64 // size of the object + modTime time.Time // modification time of the object + libraryID string // Needed to download the file +} + +// ==================== Interface fs.DirEntry ==================== + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote string +func (o *Object) Remote() string { + return o.remote +} + +// ModTime returns last modified time +func (o *Object) ModTime(context.Context) time.Time { + return o.modTime +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.size +} + +// ==================== Interface fs.ObjectInfo ==================== + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Hash returns the selected checksum of the file +// If no checksum is available it returns "" +func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// Storable says whether this object can be stored +func (o *Object) Storable() bool { + return true +} + +// ==================== Interface fs.Object ==================== + +// SetModTime sets the metadata on the object to set the modification date +func (o *Object) SetModTime(ctx context.Context, t time.Time) error { + return fs.ErrorCantSetModTime +} + +// Open opens the file for read. Call Close() on the returned io.ReadCloser +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { + downloadLink, err := o.fs.getDownloadLink(ctx, o.libraryID, o.pathInLibrary) + if err != nil { + return nil, err + } + reader, err := o.fs.download(ctx, downloadLink, o.Size(), options...) + if err != nil { + return nil, err + } + return reader, nil +} + +// Update in to the object with the modTime given of the given size +// +// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either +// return an error or update the object properly (rather than e.g. calling panic). +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + // The upload sometimes return a temporary 500 error + // We cannot use the pacer to retry uploading the file as the upload link is single use only + for retry := 0; retry <= 3; retry++ { + uploadLink, err := o.fs.getUploadLink(ctx, o.libraryID) + if err != nil { + return err + } + + uploaded, err := o.fs.upload(ctx, in, uploadLink, o.pathInLibrary) + if err == ErrorInternalDuringUpload { + // This is a temporary error, try again with a new upload link + continue + } + if err != nil { + return err + } + // Set the properties from the upload back to the object + o.size = uploaded.Size + o.id = uploaded.ID + + return nil + } + return ErrorInternalDuringUpload +} + +// Remove this object +func (o *Object) Remove(ctx context.Context) error { + return o.fs.deleteFile(ctx, o.libraryID, o.pathInLibrary) +} + +// ==================== Optional Interface fs.IDer ==================== + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.id +} diff --git a/backend/seafile/pacer.go b/backend/seafile/pacer.go new file mode 100644 index 000000000..3c99a5b85 --- /dev/null +++ b/backend/seafile/pacer.go @@ -0,0 +1,67 @@ +package seafile + +import ( + "fmt" + "net/url" + "sync" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/lib/pacer" +) + +const ( + minSleep = 100 * time.Millisecond + maxSleep = 10 * time.Second + decayConstant = 2 // bigger for slower decay, exponential +) + +// Use only one pacer per server URL +var ( + pacers map[string]*fs.Pacer + pacerMutex sync.Mutex +) + +func init() { + pacers = make(map[string]*fs.Pacer, 0) +} + +// getPacer returns the unique pacer for that remote URL +func getPacer(remote string) *fs.Pacer { + pacerMutex.Lock() + defer pacerMutex.Unlock() + + remote = parseRemote(remote) + if existing, found := pacers[remote]; found { + return existing + } + + pacers[remote] = fs.NewPacer( + pacer.NewDefault( + pacer.MinSleep(minSleep), + pacer.MaxSleep(maxSleep), + pacer.DecayConstant(decayConstant), + ), + ) + return pacers[remote] +} + +// parseRemote formats a remote url into "hostname:port" +func parseRemote(remote string) string { + remoteURL, err := url.Parse(remote) + if err != nil { + // Return a default value in the very unlikely event we're not going to parse remote + fs.Infof(nil, "Cannot parse remote %s", remote) + return "default" + } + host := remoteURL.Hostname() + port := remoteURL.Port() + if port == "" { + if remoteURL.Scheme == "https" { + port = "443" + } else { + port = "80" + } + } + return fmt.Sprintf("%s:%s", host, port) +} diff --git a/backend/seafile/seafile.go b/backend/seafile/seafile.go new file mode 100644 index 000000000..4c152a21b --- /dev/null +++ b/backend/seafile/seafile.go @@ -0,0 +1,1247 @@ +package seafile + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/coreos/go-semver/semver" + "github.com/pkg/errors" + "github.com/rclone/rclone/backend/seafile/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/bucket" + "github.com/rclone/rclone/lib/cache" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/random" + "github.com/rclone/rclone/lib/rest" +) + +const ( + librariesCacheKey = "all" + retryAfterHeader = "Retry-After" +) + +// This is global to all instances of fs +// (copying from a seafile remote to another remote would create 2 fs) +var ( + rangeDownloadNotice sync.Once // Display the notice only once + createLibraryMutex sync.Mutex // Mutex to protect library creation +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "seafile", + Description: "seafile", + NewFs: NewFs, + Options: []fs.Option{{ + Name: "url", + Help: "URL of seafile host to connect to", + Required: true, + Examples: []fs.OptionExample{{ + Value: "https://cloud.seafile.com/", + Help: "Connect to cloud.seafile.com", + }}, + }, { + Name: "user", + Help: "User name", + Required: true, + }, { + Name: "pass", + Help: "Password", + IsPassword: true, + Required: true, + }, { + Name: "library", + Help: "Name of the library. Leave blank to access all non-encrypted libraries.", + }, { + Name: "library_key", + Help: "Library password (for encrypted libraries only). Leave blank if you pass it through the command line.", + IsPassword: true, + }, { + Name: "create_library", + Help: "Should create library if it doesn't exist", + Advanced: true, + Default: false, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: (encoder.EncodeZero | + encoder.EncodeCtl | + encoder.EncodeSlash | + encoder.EncodeBackSlash | + encoder.EncodeDoubleQuote | + encoder.EncodeInvalidUtf8), + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + URL string `config:"url"` + User string `config:"user"` + Password string `config:"pass"` + LibraryName string `config:"library"` + LibraryKey string `config:"library_key"` + CreateLibrary bool `config:"create_library"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// Fs represents a remote seafile +type Fs struct { + name string // name of this remote + root string // the path we are working on + libraryName string // current library + encrypted bool // Is this an encrypted library + rootDirectory string // directory part of root (if any) + opt Options // parsed options + libraries *cache.Cache // Keep a cache of libraries + librariesMutex sync.Mutex // Mutex to protect getLibraryID + features *fs.Features // optional features + endpoint *url.URL // URL of the host + endpointURL string // endpoint as a string + srv *rest.Client // the connection to the one drive server + pacer *fs.Pacer // pacer for API calls + authMu sync.Mutex // Mutex to protect library decryption + createDirMutex sync.Mutex // Protect creation of directories + useOldDirectoryAPI bool // Use the old API v2 if seafile < 7 + moveDirNotAvailable bool // Version < 7.0 don't have an API to move a directory +} + +// ------------------------------------------------------------ + +// NewFs constructs an Fs from the path, container:path +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + root = strings.Trim(root, "/") + isLibraryRooted := opt.LibraryName != "" + var libraryName, rootDirectory string + if isLibraryRooted { + libraryName = opt.LibraryName + rootDirectory = root + } else { + libraryName, rootDirectory = bucket.Split(root) + } + + if !strings.HasSuffix(opt.URL, "/") { + opt.URL += "/" + } + if opt.Password != "" { + var err error + opt.Password, err = obscure.Reveal(opt.Password) + if err != nil { + return nil, errors.Wrap(err, "couldn't decrypt user password") + } + } + if opt.LibraryKey != "" { + var err error + opt.LibraryKey, err = obscure.Reveal(opt.LibraryKey) + if err != nil { + return nil, errors.Wrap(err, "couldn't decrypt library password") + } + } + + // Parse the endpoint + u, err := url.Parse(opt.URL) + if err != nil { + return nil, err + } + + f := &Fs{ + name: name, + root: root, + libraryName: libraryName, + rootDirectory: rootDirectory, + libraries: cache.New(), + opt: *opt, + endpoint: u, + endpointURL: u.String(), + srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()), + pacer: getPacer(opt.URL), + } + f.features = (&fs.Features{ + CanHaveEmptyDirectories: true, + BucketBased: opt.LibraryName == "", + }).Fill(f) + + ctx := context.Background() + serverInfo, err := f.getServerInfo(ctx) + if err != nil { + return nil, err + } + fs.Debugf(nil, "Seafile server version %s", serverInfo.Version) + + // We don't support bellow seafile v6.0 (version 6.0 is already more than 3 years old) + serverVersion := semver.New(serverInfo.Version) + if serverVersion.Major < 6 { + return nil, errors.New("unsupported Seafile server (version < 6.0)") + } + if serverVersion.Major < 7 { + // Seafile 6 does not support recursive listing + f.useOldDirectoryAPI = true + f.features.ListR = nil + // It also does no support moving directories + f.moveDirNotAvailable = true + } + + err = f.authorizeAccount(ctx) + if err != nil { + return nil, err + } + + if f.libraryName != "" { + // Check if the library exists + exists, err := f.libraryExists(ctx, f.libraryName) + if err != nil { + return f, err + } + if !exists { + if f.opt.CreateLibrary { + err := f.mkLibrary(ctx, f.libraryName, "") + if err != nil { + return f, err + } + } else { + return f, fmt.Errorf("library '%s' was not found, and the option to create it is not activated (advanced option)", f.libraryName) + } + } + libraryID, err := f.getLibraryID(ctx, f.libraryName) + if err != nil { + return f, err + } + f.encrypted, err = f.isEncrypted(ctx, libraryID) + if err != nil { + return f, err + } + if f.encrypted { + // If we're inside an encrypted library, let's decrypt it now + err = f.authorizeLibrary(ctx, libraryID) + if err != nil { + return f, err + } + // And remove the public link feature + f.features.PublicLink = nil + } + } else { + // Deactivate the cleaner feature since there's no library selected + f.features.CleanUp = nil + } + + if f.rootDirectory != "" { + // Check to see if the root is an existing file + remote := path.Base(rootDirectory) + f.rootDirectory = path.Dir(rootDirectory) + if f.rootDirectory == "." { + f.rootDirectory = "" + } + _, err := f.NewObject(ctx, remote) + if err != nil { + if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { + // File doesn't exist so return the original f + f.rootDirectory = rootDirectory + return f, nil + } + return f, err + } + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + return f, nil +} + +// sets the AuthorizationToken up +func (f *Fs) setAuthorizationToken(token string) { + f.srv.SetHeader("Authorization", "Token "+token) +} + +// authorizeAccount gets the auth token. +func (f *Fs) authorizeAccount(ctx context.Context) error { + f.authMu.Lock() + defer f.authMu.Unlock() + token, err := f.getAuthorizationToken(ctx) + if err != nil { + return err + } + f.setAuthorizationToken(token) + return nil +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 401, // Unauthorized (eg "Token has expired") + 408, // Request Timeout + 429, // Rate exceeded. + 500, // Get occasional 500 Internal Server Error + 503, // Service Unavailable + 504, // Gateway Time-out + 520, // Operation failed (We get them sometimes when running tests in parallel) +} + +// shouldRetryNoAuth returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) { + // For 429 errors look at the Retry-After: header and + // set the retry appropriately, starting with a minimum of 1 + // second if it isn't set. + if resp != nil && (resp.StatusCode == 429) { + var retryAfter = 1 + retryAfterString := resp.Header.Get(retryAfterHeader) + if retryAfterString != "" { + var err error + retryAfter, err = strconv.Atoi(retryAfterString) + if err != nil { + fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err) + } + } + return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second) + } + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + // It looks like seafile is using the 403 error code instead of the standard 401. + if resp != nil && (resp.StatusCode == 401 || resp.StatusCode == 403) { + fs.Debugf(f, "Unauthorized: %v", err) + // Reauth + authErr := f.authorizeAccount(ctx) + if authErr != nil { + err = authErr + } + return true, err + } + return f.shouldRetryNoReauth(resp, err) +} + +func (f *Fs) shouldRetryUpload(ctx context.Context, resp *http.Response, err error) (bool, error) { + if err != nil || (resp != nil && resp.StatusCode > 400) { + return true, err + } + return false, nil +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + if f.libraryName == "" { + return fmt.Sprintf("seafile root") + } + library := "library" + if f.encrypted { + library = "encrypted " + library + } + if f.rootDirectory == "" { + return fmt.Sprintf("seafile %s '%s'", library, f.libraryName) + } + return fmt.Sprintf("seafile %s '%s' path '%s'", library, f.libraryName, f.rootDirectory) +} + +// Precision of the ModTimes in this Fs +func (f *Fs) Precision() time.Duration { + // The API doesn't support setting the modified time + return fs.ModTimeNotSupported +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return fs.ErrorDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + if dir == "" && f.libraryName == "" { + return f.listLibraries(ctx) + } + return f.listDir(ctx, dir, false) +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + libraryName, filePath := f.splitPath(remote) + libraryID, err := f.getLibraryID(ctx, libraryName) + if err != nil { + return nil, err + } + err = f.authorizeLibrary(ctx, libraryID) + if err != nil { + return nil, err + } + + fileDetails, err := f.getFileDetails(ctx, libraryID, filePath) + if err != nil { + return nil, err + } + + modTime, err := time.Parse(time.RFC3339, fileDetails.Modified) + if err != nil { + fs.LogPrintf(fs.LogLevelWarning, fileDetails.Modified, "Cannot parse datetime") + } + + o := &Object{ + fs: f, + libraryID: libraryID, + id: fileDetails.ID, + remote: remote, + pathInLibrary: filePath, + modTime: modTime, + size: fileDetails.Size, + } + return o, nil +} + +// Put in to the remote path with the modTime given of the given size +// +// When called from outside a Fs by rclone, src.Size() will always be >= 0. +// But for unknown-sized objects (indicated by src.Size() == -1), Put should either +// return an error or upload it properly (rather than e.g. calling panic). +// +// May create the object even if it returns an error - if so +// will return the object and the error, otherwise will return +// nil and the error +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + object := f.newObject(ctx, src.Remote(), src.Size(), src.ModTime(ctx)) + // Check if we need to create a new library at that point + if object.libraryID == "" { + library, _ := f.splitPath(object.remote) + err := f.Mkdir(ctx, library) + if err != nil { + return object, err + } + libraryID, err := f.getLibraryID(ctx, library) + if err != nil { + return object, err + } + object.libraryID = libraryID + } + err := object.Update(ctx, in, src, options...) + if err != nil { + return object, err + } + return object, nil +} + +// PutStream uploads to the remote path with the modTime given but of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +// Mkdir makes the directory or library +// +// Shouldn't return an error if it already exists +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + libraryName, folder := f.splitPath(dir) + if strings.HasPrefix(dir, libraryName) { + err := f.mkLibrary(ctx, libraryName, "") + if err != nil { + return err + } + if folder == "" { + // No directory to create after the library + return nil + } + } + err := f.mkDir(ctx, dir) + if err != nil { + return err + } + return nil +} + +// Rmdir removes the directory or library if empty +// +// Return an error if it doesn't exist or isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + libraryName, dirPath := f.splitPath(dir) + libraryID, err := f.getLibraryID(ctx, libraryName) + if err != nil { + return err + } + + directoryEntries, err := f.getDirectoryEntries(ctx, libraryID, dirPath, false) + if err != nil { + return err + } + if len(directoryEntries) > 0 { + return fs.ErrorDirectoryNotEmpty + } + if dirPath == "" || dirPath == "/" { + return f.deleteLibrary(ctx, libraryID) + } + return f.deleteDir(ctx, libraryID, dirPath) +} + +// ==================== Optional Interface fs.ListRer ==================== + +// ListR lists the objects and directories of the Fs starting +// from dir recursively into out. +// +// dir should be "" to start from the root, and should not +// have trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +// +// It should call callback for each tranche of entries read. +// These need not be returned in any particular order. If +// callback returns an error then the listing will stop +// immediately. +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) error { + var err error + + if dir == "" && f.libraryName == "" { + libraries, err := f.listLibraries(ctx) + if err != nil { + return err + } + // Send the library list as folders + err = callback(libraries) + if err != nil { + return err + } + + // Then list each library + for _, library := range libraries { + err = f.listDirCallback(ctx, library.Remote(), callback) + if err != nil { + return err + } + } + return nil + } + err = f.listDirCallback(ctx, dir, callback) + if err != nil { + return err + } + return nil +} + +// ==================== Optional Interface fs.Copier ==================== + +// Copy src to this remote using server side copy operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + return nil, fs.ErrorCantCopy + } + srcLibraryName, srcPath := srcObj.fs.splitPath(src.Remote()) + srcLibraryID, err := srcObj.fs.getLibraryID(ctx, srcLibraryName) + if err != nil { + return nil, err + } + dstLibraryName, dstPath := f.splitPath(remote) + dstLibraryID, err := f.getLibraryID(ctx, dstLibraryName) + if err != nil { + return nil, err + } + + // Seafile does not accept a file name as a destination, only a path. + // The destination filename will be the same as the original, or with (1) added in case it was already existing + dstDir, dstFilename := path.Split(dstPath) + + // We have to make sure the destination path exists on the server or it's going to bomb out with an obscure error message + err = f.mkMultiDir(ctx, dstLibraryID, dstDir) + if err != nil { + return nil, err + } + + op, err := f.copyFile(ctx, srcLibraryID, srcPath, dstLibraryID, dstDir) + if err != nil { + return nil, err + } + + if op.Name != dstFilename { + // Destination was existing, so we need to move the file back into place + err = f.adjustDestination(ctx, dstLibraryID, op.Name, dstPath, dstDir, dstFilename) + if err != nil { + return nil, err + } + } + // Create a new object from the result + return f.NewObject(ctx, remote) +} + +// ==================== Optional Interface fs.Mover ==================== + +// Move src to this remote using server side move operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + return nil, fs.ErrorCantMove + } + + srcLibraryName, srcPath := srcObj.fs.splitPath(src.Remote()) + srcLibraryID, err := srcObj.fs.getLibraryID(ctx, srcLibraryName) + if err != nil { + return nil, err + } + dstLibraryName, dstPath := f.splitPath(remote) + dstLibraryID, err := f.getLibraryID(ctx, dstLibraryName) + if err != nil { + return nil, err + } + + // anchor both source and destination paths from the root so we can compare them + srcPath = path.Join("/", srcPath) + dstPath = path.Join("/", dstPath) + + srcDir := path.Dir(srcPath) + dstDir, dstFilename := path.Split(dstPath) + + if srcLibraryID == dstLibraryID && srcDir == dstDir { + // It's only a simple case of renaming the file + _, err := f.renameFile(ctx, srcLibraryID, srcPath, dstFilename) + if err != nil { + return nil, err + } + return f.NewObject(ctx, remote) + } + + // We have to make sure the destination path exists on the server + err = f.mkMultiDir(ctx, dstLibraryID, dstDir) + if err != nil { + return nil, err + } + + // Seafile does not accept a file name as a destination, only a path. + // The destination filename will be the same as the original, or with (1) added in case it already exists + op, err := f.moveFile(ctx, srcLibraryID, srcPath, dstLibraryID, dstDir) + if err != nil { + return nil, err + } + + if op.Name != dstFilename { + // Destination was existing, so we need to move the file back into place + err = f.adjustDestination(ctx, dstLibraryID, op.Name, dstPath, dstDir, dstFilename) + if err != nil { + return nil, err + } + } + + // Create a new object from the result + return f.NewObject(ctx, remote) +} + +// adjustDestination rename the file +func (f *Fs) adjustDestination(ctx context.Context, libraryID, srcFilename, dstPath, dstDir, dstFilename string) error { + // Seafile seems to be acting strangely if the renamed file already exists (some cache issue maybe?) + // It's better to delete the destination if it already exists + fileDetail, err := f.getFileDetails(ctx, libraryID, dstPath) + if err != nil && err != fs.ErrorObjectNotFound { + return err + } + if fileDetail != nil { + err = f.deleteFile(ctx, libraryID, dstPath) + if err != nil { + return err + } + } + _, err = f.renameFile(ctx, libraryID, path.Join(dstDir, srcFilename), dstFilename) + if err != nil { + return err + } + + return nil +} + +// ==================== Optional Interface fs.DirMover ==================== + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + + // Cast into a seafile Fs + srcFs, ok := src.(*Fs) + if !ok { + return fs.ErrorCantDirMove + } + + srcLibraryName, srcPath := srcFs.splitPath(srcRemote) + srcLibraryID, err := srcFs.getLibraryID(ctx, srcLibraryName) + if err != nil { + return err + } + dstLibraryName, dstPath := f.splitPath(dstRemote) + dstLibraryID, err := f.getLibraryID(ctx, dstLibraryName) + if err != nil { + return err + } + + srcDir := path.Dir(srcPath) + dstDir, dstName := path.Split(dstPath) + + // anchor both source and destination to the root so we can compare them + srcDir = path.Join("/", srcDir) + dstDir = path.Join("/", dstDir) + + // The destination should not exist + entries, err := f.getDirectoryEntries(ctx, dstLibraryID, dstDir, false) + if err != nil && err != fs.ErrorDirNotFound { + return err + } + if err == nil { + for _, entry := range entries { + if entry.Name == dstName { + // Destination exists + return fs.ErrorDirExists + } + } + } + if srcLibraryID == dstLibraryID && srcDir == dstDir { + // It's only renaming + err = srcFs.renameDir(ctx, dstLibraryID, srcPath, dstName) + if err != nil { + return err + } + return nil + } + + // Seafile < 7 does not support moving directories + if f.moveDirNotAvailable { + return fs.ErrorCantDirMove + } + + // Make sure the destination path exists + err = f.mkMultiDir(ctx, dstLibraryID, dstDir) + if err != nil { + return err + } + + // If the destination already exists, seafile will add a " (n)" to the name. + // Sadly this API call will not return the new given name like the move file version does + // So the trick is to rename the directory to something random before moving it + // After the move we rename the random name back to the expected one + // Hopefully there won't be anything with the same name existing at destination ;) + tempName := ".rclone-move-" + random.String(32) + + // 1- rename source + err = srcFs.renameDir(ctx, srcLibraryID, srcPath, tempName) + if err != nil { + return errors.Wrap(err, "Cannot rename source directory to a temporary name") + } + + // 2- move source to destination + err = f.moveDir(ctx, srcLibraryID, srcDir, tempName, dstLibraryID, dstDir) + if err != nil { + // Doh! Let's rename the source back to its original name + _ = srcFs.renameDir(ctx, srcLibraryID, path.Join(srcDir, tempName), path.Base(srcPath)) + return err + } + + // 3- rename destination back to source name + err = f.renameDir(ctx, dstLibraryID, path.Join(dstDir, tempName), dstName) + if err != nil { + return errors.Wrap(err, "Cannot rename temporary directory to destination name") + } + + return nil +} + +// ==================== Optional Interface fs.Purger ==================== + +// Purge all files in the root and the root directory +// +// Implement this if you have a way of deleting all the files +// quicker than just running Remove() on the result of List() +// +// Return an error if it doesn't exist +func (f *Fs) Purge(ctx context.Context) error { + if f.libraryName == "" { + return errors.New("Cannot delete from the root of the server. Please select a library") + } + libraryID, err := f.getLibraryID(ctx, f.libraryName) + if err != nil { + return err + } + if f.rootDirectory == "" { + // Delete library + err = f.deleteLibrary(ctx, libraryID) + if err != nil { + return err + } + return nil + } + err = f.deleteDir(ctx, libraryID, f.rootDirectory) + if err != nil { + return err + } + return nil +} + +// ==================== Optional Interface fs.CleanUpper ==================== + +// CleanUp the trash in the Fs +func (f *Fs) CleanUp(ctx context.Context) error { + if f.libraryName == "" { + return errors.New("Cannot clean up at the root of the seafile server: please select a library to clean up") + } + libraryID, err := f.getLibraryID(ctx, f.libraryName) + if err != nil { + return err + } + return f.emptyLibraryTrash(ctx, libraryID) +} + +// ==================== Optional Interface fs.Abouter ==================== + +// About gets quota information +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { + accountInfo, err := f.getUserAccountInfo(ctx) + if err != nil { + return nil, err + } + + usage = &fs.Usage{ + Used: fs.NewUsageValue(accountInfo.Usage), // bytes in use + } + if accountInfo.Total > 0 { + usage.Total = fs.NewUsageValue(accountInfo.Total) // quota of bytes that can be used + usage.Free = fs.NewUsageValue(accountInfo.Total - accountInfo.Usage) // bytes which can be uploaded before reaching the quota + } + return usage, nil +} + +// ==================== Optional Interface fs.UserInfoer ==================== + +// UserInfo returns info about the connected user +func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) { + accountInfo, err := f.getUserAccountInfo(ctx) + if err != nil { + return nil, err + } + return map[string]string{ + "Name": accountInfo.Name, + "Email": accountInfo.Email, + }, nil +} + +// ==================== Optional Interface fs.PublicLinker ==================== + +// PublicLink generates a public link to the remote path (usually readable by anyone) +func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) { + libraryName, filePath := f.splitPath(remote) + if libraryName == "" { + // We cannot share the whole seafile server, we need at least a library + return "", errors.New("Cannot share the root of the seafile server. Please select a library to share") + } + libraryID, err := f.getLibraryID(ctx, libraryName) + if err != nil { + return "", err + } + + // List existing links first + shareLinks, err := f.listShareLinks(ctx, libraryID, filePath) + if err != nil { + return "", err + } + if shareLinks != nil && len(shareLinks) > 0 { + for _, shareLink := range shareLinks { + if shareLink.IsExpired == false { + return shareLink.Link, nil + } + } + } + // No link was found + shareLink, err := f.createShareLink(ctx, libraryID, filePath) + if err != nil { + return "", err + } + if shareLink.IsExpired { + return "", nil + } + return shareLink.Link, nil +} + +func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err error) { + libraries, err := f.getCachedLibraries(ctx) + if err != nil { + return nil, errors.New("cannot load libraries") + } + + for _, library := range libraries { + d := fs.NewDir(library.Name, time.Unix(library.Modified, 0)) + d.SetSize(int64(library.Size)) + entries = append(entries, d) + } + + return entries, nil +} + +func (f *Fs) libraryExists(ctx context.Context, libraryName string) (bool, error) { + libraries, err := f.getCachedLibraries(ctx) + if err != nil { + return false, err + } + + for _, library := range libraries { + if library.Name == libraryName { + return true, nil + } + } + return false, nil +} + +func (f *Fs) getLibraryID(ctx context.Context, name string) (string, error) { + libraries, err := f.getCachedLibraries(ctx) + if err != nil { + return "", err + } + + for _, library := range libraries { + if library.Name == name { + return library.ID, nil + } + } + return "", fmt.Errorf("cannot find library '%s'", name) +} + +func (f *Fs) isLibraryInCache(libraryName string) bool { + f.librariesMutex.Lock() + defer f.librariesMutex.Unlock() + + if f.libraries == nil { + return false + } + value, found := f.libraries.GetMaybe(librariesCacheKey) + if found == false { + return false + } + libraries := value.([]api.Library) + for _, library := range libraries { + if library.Name == libraryName { + return true + } + } + return false +} + +func (f *Fs) isEncrypted(ctx context.Context, libraryID string) (bool, error) { + libraries, err := f.getCachedLibraries(ctx) + if err != nil { + return false, err + } + + for _, library := range libraries { + if library.ID == libraryID { + return library.Encrypted, nil + } + } + return false, fmt.Errorf("cannot find library ID %s", libraryID) +} + +func (f *Fs) authorizeLibrary(ctx context.Context, libraryID string) error { + if libraryID == "" { + return errors.New("a library ID is needed") + } + if f.opt.LibraryKey == "" { + // We have no password to send + return nil + } + encrypted, err := f.isEncrypted(ctx, libraryID) + if err != nil { + return err + } + if encrypted { + fs.Debugf(nil, "Decrypting library %s", libraryID) + f.authMu.Lock() + defer f.authMu.Unlock() + err := f.decryptLibrary(ctx, libraryID, f.opt.LibraryKey) + if err != nil { + return err + } + } + return nil +} + +func (f *Fs) mkLibrary(ctx context.Context, libraryName, password string) error { + // lock specific to library creation + // we cannot reuse the same lock as we will dead-lock ourself if the libraries are not in cache + createLibraryMutex.Lock() + defer createLibraryMutex.Unlock() + + if libraryName == "" { + return errors.New("a library name is needed") + } + + // It's quite likely that multiple go routines are going to try creating the same library + // at the start of a sync/copy. After releasing the mutex the calls waiting would try to create + // the same library again. So we'd better check the library exists first + if f.isLibraryInCache(libraryName) { + return nil + } + + fs.Debugf(nil, "%s: Create library '%s'", f.Name(), libraryName) + f.librariesMutex.Lock() + defer f.librariesMutex.Unlock() + + library, err := f.createLibrary(ctx, libraryName, password) + if err != nil { + return err + } + // Stores the library details into the cache + value, found := f.libraries.GetMaybe(librariesCacheKey) + if found == false { + // Don't update the cache at that point + return nil + } + libraries := value.([]api.Library) + libraries = append(libraries, api.Library{ + ID: library.ID, + Name: library.Name, + }) + f.libraries.Put(librariesCacheKey, libraries) + return nil +} + +// splitPath returns the library name and the full path inside the library +func (f *Fs) splitPath(dir string) (library, folder string) { + library = f.libraryName + folder = dir + if library == "" { + // The first part of the path is the library + library, folder = bucket.Split(dir) + } else if f.rootDirectory != "" { + // Adds the root folder to the path to get a full path + folder = path.Join(f.rootDirectory, folder) + } + return +} + +func (f *Fs) listDir(ctx context.Context, dir string, recursive bool) (entries fs.DirEntries, err error) { + libraryName, dirPath := f.splitPath(dir) + libraryID, err := f.getLibraryID(ctx, libraryName) + if err != nil { + return nil, err + } + + directoryEntries, err := f.getDirectoryEntries(ctx, libraryID, dirPath, recursive) + if err != nil { + return nil, err + } + + return f.buildDirEntries(dir, libraryID, dirPath, directoryEntries, recursive), nil +} + +// listDirCallback is calling listDir with the recursive option and is sending the result to the callback +func (f *Fs) listDirCallback(ctx context.Context, dir string, callback fs.ListRCallback) error { + entries, err := f.listDir(ctx, dir, true) + if err != nil { + return err + } + err = callback(entries) + if err != nil { + return err + } + return nil +} + +func (f *Fs) buildDirEntries(parentPath, libraryID, parentPathInLibrary string, directoryEntries []api.DirEntry, recursive bool) (entries fs.DirEntries) { + for _, entry := range directoryEntries { + var filePath, filePathInLibrary string + if recursive { + // In recursive mode, paths are built from DirEntry (+ a starting point) + entryPath := strings.TrimPrefix(entry.Path, "/") + // If we're listing from some path inside the library (not the root) + // there's already a path in parameter, which will also be included in the entry path + entryPath = strings.TrimPrefix(entryPath, parentPathInLibrary) + entryPath = strings.TrimPrefix(entryPath, "/") + + filePath = path.Join(parentPath, entryPath, entry.Name) + filePathInLibrary = path.Join(parentPathInLibrary, entryPath, entry.Name) + } else { + // In non-recursive mode, paths are build from the parameters + filePath = path.Join(parentPath, entry.Name) + filePathInLibrary = path.Join(parentPathInLibrary, entry.Name) + } + if entry.Type == api.FileTypeDir { + d := fs. + NewDir(filePath, time.Unix(entry.Modified, 0)). + SetSize(entry.Size). + SetID(entry.ID) + entries = append(entries, d) + } else if entry.Type == api.FileTypeFile { + object := &Object{ + fs: f, + id: entry.ID, + remote: filePath, + pathInLibrary: filePathInLibrary, + size: entry.Size, + modTime: time.Unix(entry.Modified, 0), + libraryID: libraryID, + } + entries = append(entries, object) + } + } + return entries +} + +func (f *Fs) mkDir(ctx context.Context, dir string) error { + library, fullPath := f.splitPath(dir) + libraryID, err := f.getLibraryID(ctx, library) + if err != nil { + return err + } + return f.mkMultiDir(ctx, libraryID, fullPath) +} + +func (f *Fs) mkMultiDir(ctx context.Context, libraryID, dir string) error { + // rebuild the path one by one + currentPath := "" + for _, singleDir := range splitPath(dir) { + currentPath = path.Join(currentPath, singleDir) + err := f.mkSingleDir(ctx, libraryID, currentPath) + if err != nil { + return err + } + } + return nil +} + +func (f *Fs) mkSingleDir(ctx context.Context, libraryID, dir string) error { + f.createDirMutex.Lock() + defer f.createDirMutex.Unlock() + + dirDetails, err := f.getDirectoryDetails(ctx, libraryID, dir) + if err == nil && dirDetails != nil { + // Don't fail if the directory exists + return nil + } + if err == fs.ErrorDirNotFound { + err = f.createDir(ctx, libraryID, dir) + if err != nil { + return err + } + return nil + } + return err +} + +func (f *Fs) getDirectoryEntries(ctx context.Context, libraryID, folder string, recursive bool) ([]api.DirEntry, error) { + if f.useOldDirectoryAPI { + return f.getDirectoryEntriesAPIv2(ctx, libraryID, folder) + } + return f.getDirectoryEntriesAPIv21(ctx, libraryID, folder, recursive) +} + +// splitPath creates a slice of paths +func splitPath(tree string) (paths []string) { + tree, leaf := path.Split(path.Clean(tree)) + for leaf != "" && leaf != "." { + paths = append([]string{leaf}, paths...) + tree, leaf = path.Split(path.Clean(tree)) + } + return +} + +func (f *Fs) getCachedLibraries(ctx context.Context) ([]api.Library, error) { + f.librariesMutex.Lock() + defer f.librariesMutex.Unlock() + + libraries, err := f.libraries.Get(librariesCacheKey, func(key string) (value interface{}, ok bool, error error) { + // Load the libraries if not present in the cache + libraries, err := f.getLibraries(ctx) + if err != nil { + return nil, false, err + } + return libraries, true, nil + }) + if err != nil { + return nil, err + } + // Type assertion + return libraries.([]api.Library), nil +} + +func (f *Fs) newObject(ctx context.Context, remote string, size int64, modTime time.Time) *Object { + libraryName, remotePath := f.splitPath(remote) + libraryID, _ := f.getLibraryID(ctx, libraryName) // If error it means the library does not exist (yet) + + object := &Object{ + fs: f, + remote: remote, + libraryID: libraryID, + pathInLibrary: remotePath, + size: size, + modTime: modTime, + } + return object +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = &Fs{} + _ fs.Abouter = &Fs{} + _ fs.CleanUpper = &Fs{} + _ fs.Copier = &Fs{} + _ fs.Mover = &Fs{} + _ fs.DirMover = &Fs{} + _ fs.ListRer = &Fs{} + _ fs.Purger = &Fs{} + _ fs.PutStreamer = &Fs{} + _ fs.PublicLinker = &Fs{} + _ fs.UserInfoer = &Fs{} + _ fs.Object = &Object{} + _ fs.IDer = &Object{} +) diff --git a/backend/seafile/seafile_internal_test.go b/backend/seafile/seafile_internal_test.go new file mode 100644 index 000000000..784e8c845 --- /dev/null +++ b/backend/seafile/seafile_internal_test.go @@ -0,0 +1,123 @@ +package seafile + +import ( + "path" + "testing" + + "github.com/stretchr/testify/assert" +) + +type pathData struct { + configLibrary string // Library specified in the config + configRoot string // Root directory specified in the config + argumentPath string // Path given as an argument in the command line + expectedLibrary string + expectedPath string +} + +// Test the method to split a library name and a path +// from a mix of configuration data and path command line argument +func TestSplitPath(t *testing.T) { + testData := []pathData{ + pathData{ + configLibrary: "", + configRoot: "", + argumentPath: "", + expectedLibrary: "", + expectedPath: "", + }, + pathData{ + configLibrary: "", + configRoot: "", + argumentPath: "Library", + expectedLibrary: "Library", + expectedPath: "", + }, + pathData{ + configLibrary: "", + configRoot: "", + argumentPath: path.Join("Library", "path", "to", "file"), + expectedLibrary: "Library", + expectedPath: path.Join("path", "to", "file"), + }, + pathData{ + configLibrary: "Library", + configRoot: "", + argumentPath: "", + expectedLibrary: "Library", + expectedPath: "", + }, + pathData{ + configLibrary: "Library", + configRoot: "", + argumentPath: "path", + expectedLibrary: "Library", + expectedPath: "path", + }, + pathData{ + configLibrary: "Library", + configRoot: "", + argumentPath: path.Join("path", "to", "file"), + expectedLibrary: "Library", + expectedPath: path.Join("path", "to", "file"), + }, + pathData{ + configLibrary: "Library", + configRoot: "root", + argumentPath: "", + expectedLibrary: "Library", + expectedPath: "root", + }, + pathData{ + configLibrary: "Library", + configRoot: path.Join("root", "path"), + argumentPath: "", + expectedLibrary: "Library", + expectedPath: path.Join("root", "path"), + }, + pathData{ + configLibrary: "Library", + configRoot: "root", + argumentPath: "path", + expectedLibrary: "Library", + expectedPath: path.Join("root", "path"), + }, + pathData{ + configLibrary: "Library", + configRoot: "root", + argumentPath: path.Join("path", "to", "file"), + expectedLibrary: "Library", + expectedPath: path.Join("root", "path", "to", "file"), + }, + pathData{ + configLibrary: "Library", + configRoot: path.Join("root", "path"), + argumentPath: path.Join("subpath", "to", "file"), + expectedLibrary: "Library", + expectedPath: path.Join("root", "path", "subpath", "to", "file"), + }, + } + for _, test := range testData { + fs := &Fs{ + libraryName: test.configLibrary, + rootDirectory: test.configRoot, + } + libraryName, path := fs.splitPath(test.argumentPath) + + assert.Equal(t, test.expectedLibrary, libraryName) + assert.Equal(t, test.expectedPath, path) + } +} + +func TestSplitPathIntoSlice(t *testing.T) { + testData := map[string][]string{ + "1": {"1"}, + "/1": {"1"}, + "/1/": {"1"}, + "1/2/3": {"1", "2", "3"}, + } + for input, expected := range testData { + output := splitPath(input) + assert.Equal(t, expected, output) + } +} diff --git a/backend/seafile/seafile_test.go b/backend/seafile/seafile_test.go new file mode 100644 index 000000000..669478548 --- /dev/null +++ b/backend/seafile/seafile_test.go @@ -0,0 +1,17 @@ +// Test Seafile filesystem interface +package seafile_test + +import ( + "testing" + + "github.com/rclone/rclone/backend/seafile" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestSeafile:", + NilObject: (*seafile.Object)(nil), + }) +} diff --git a/backend/seafile/webapi.go b/backend/seafile/webapi.go new file mode 100644 index 000000000..84f00aa11 --- /dev/null +++ b/backend/seafile/webapi.go @@ -0,0 +1,1083 @@ +package seafile + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "path" + "strings" + + "github.com/pkg/errors" + "github.com/rclone/rclone/backend/seafile/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/lib/readers" + "github.com/rclone/rclone/lib/rest" +) + +// Start of the API URLs +const ( + APIv20 = "api2/repos/" + APIv21 = "api/v2.1/repos/" +) + +// Errors specific to seafile fs +var ( + ErrorInternalDuringUpload = errors.New("Internal server error during file upload") +) + +// ==================== Seafile API ==================== + +func (f *Fs) getAuthorizationToken(ctx context.Context) (string, error) { + // API Socumentation + // https://download.seafile.com/published/web-api/home.md#user-content-Quick%20Start + opts := rest.Opts{ + Method: "POST", + Path: "api2/auth-token/", + ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request + } + + request := api.AuthenticationRequest{ + Username: f.opt.User, + Password: f.opt.Password, + } + result := api.AuthenticationResult{} + + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return f.shouldRetryNoReauth(resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return "", fs.ErrorPermissionDenied + } + } + return "", errors.Wrap(err, "failed to authenticate") + } + if result.Errors != nil && len(result.Errors) > 1 { + return "", errors.New(strings.Join(result.Errors, ", ")) + } + return result.Token, nil +} + +func (f *Fs) getServerInfo(ctx context.Context) (account *api.ServerInfo, err error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/server-info.md#user-content-Get%20Server%20Information + opts := rest.Opts{ + Method: "GET", + Path: "api2/server-info/", + } + + result := api.ServerInfo{} + + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + } + return nil, errors.Wrap(err, "failed to get server info") + } + return &result, nil +} + +func (f *Fs) getUserAccountInfo(ctx context.Context) (account *api.AccountInfo, err error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/account.md#user-content-Check%20Account%20Info + opts := rest.Opts{ + Method: "GET", + Path: "api2/account/info/", + } + + result := api.AccountInfo{} + + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + } + return nil, errors.Wrap(err, "failed to get account info") + } + return &result, nil +} + +func (f *Fs) getLibraries(ctx context.Context) ([]api.Library, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/libraries.md#user-content-List%20Libraries + opts := rest.Opts{ + Method: "GET", + Path: APIv20, + } + + result := make([]api.Library, 1) + + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + } + return nil, errors.Wrap(err, "failed to get libraries") + } + return result, nil +} + +func (f *Fs) createLibrary(ctx context.Context, libraryName, password string) (library *api.CreateLibrary, err error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/libraries.md#user-content-Create%20Library + opts := rest.Opts{ + Method: "POST", + Path: APIv20, + } + + request := api.CreateLibraryRequest{ + Name: f.opt.Enc.FromStandardName(libraryName), + Description: "Created by rclone", + Password: password, + } + result := &api.CreateLibrary{} + + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + } + return nil, errors.Wrap(err, "failed to create library") + } + return result, nil +} + +func (f *Fs) deleteLibrary(ctx context.Context, libraryID string) error { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/libraries.md#user-content-Create%20Library + opts := rest.Opts{ + Method: "DELETE", + Path: APIv20 + libraryID + "/", + } + + result := "" + + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return fs.ErrorPermissionDenied + } + } + return errors.Wrap(err, "failed to delete library") + } + return nil +} + +func (f *Fs) decryptLibrary(ctx context.Context, libraryID, password string) error { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/library-encryption.md#user-content-Decrypt%20Library + if libraryID == "" { + return errors.New("cannot list files without a library") + } + // This is another call that cannot accept a JSON input so we have to build it manually + opts := rest.Opts{ + Method: "POST", + Path: APIv20 + libraryID + "/", + ContentType: "application/x-www-form-urlencoded", + Body: bytes.NewBuffer([]byte("password=" + f.opt.Enc.FromStandardName(password))), + NoResponse: true, + } + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.Call(ctx, &opts) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 400 { + return errors.New("incorrect password") + } + if resp.StatusCode == 409 { + fs.Debugf(nil, "library is not encrypted") + return nil + } + } + return errors.Wrap(err, "failed to decrypt library") + } + return nil +} + +func (f *Fs) getDirectoryEntriesAPIv21(ctx context.Context, libraryID, dirPath string, recursive bool) ([]api.DirEntry, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory + // This is using the undocumented version 2.1 of the API (so we can use the recursive option which is not available in the version 2) + if libraryID == "" { + return nil, errors.New("cannot list files without a library") + } + dirPath = path.Join("/", dirPath) + + recursiveFlag := "0" + if recursive { + recursiveFlag = "1" + } + opts := rest.Opts{ + Method: "GET", + Path: APIv21 + libraryID + "/dir/", + Parameters: url.Values{ + "recursive": {recursiveFlag}, + "p": {f.opt.Enc.FromStandardPath(dirPath)}, + }, + } + result := &api.DirEntries{} + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 404 { + return nil, fs.ErrorDirNotFound + } + if resp.StatusCode == 440 { + // Encrypted library and password not provided + return nil, fs.ErrorPermissionDenied + } + } + return nil, errors.Wrap(err, "failed to get directory contents") + } + + // Clean up encoded names + for index, fileInfo := range result.Entries { + fileInfo.Name = f.opt.Enc.ToStandardName(fileInfo.Name) + fileInfo.Path = f.opt.Enc.ToStandardPath(fileInfo.Path) + result.Entries[index] = fileInfo + } + return result.Entries, nil +} + +func (f *Fs) getDirectoryDetails(ctx context.Context, libraryID, dirPath string) (*api.DirectoryDetail, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-Get%20Directory%20Detail + if libraryID == "" { + return nil, errors.New("cannot read directory without a library") + } + dirPath = path.Join("/", dirPath) + + opts := rest.Opts{ + Method: "GET", + Path: APIv21 + libraryID + "/dir/detail/", + Parameters: url.Values{"path": {f.opt.Enc.FromStandardPath(dirPath)}}, + } + result := &api.DirectoryDetail{} + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + if resp.StatusCode == 404 { + return nil, fs.ErrorDirNotFound + } + } + return nil, errors.Wrap(err, "failed to get directory details") + } + result.Name = f.opt.Enc.ToStandardName(result.Name) + result.Path = f.opt.Enc.ToStandardPath(result.Path) + return result, nil +} + +// createDir creates a new directory. The API will add a number to the directory name if it already exist +func (f *Fs) createDir(ctx context.Context, libraryID, dirPath string) error { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-Create%20New%20Directory + if libraryID == "" { + return errors.New("cannot create directory without a library") + } + dirPath = path.Join("/", dirPath) + + // This call *cannot* handle json parameters in the body, so we have to build the request body manually + opts := rest.Opts{ + Method: "POST", + Path: APIv20 + libraryID + "/dir/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(dirPath)}}, + NoRedirect: true, + ContentType: "application/x-www-form-urlencoded", + Body: bytes.NewBuffer([]byte("operation=mkdir")), + NoResponse: true, + } + + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.Call(ctx, &opts) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return fs.ErrorPermissionDenied + } + } + return errors.Wrap(err, "failed to create directory") + } + return nil +} + +func (f *Fs) renameDir(ctx context.Context, libraryID, dirPath, newName string) error { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-Rename%20Directory + if libraryID == "" { + return errors.New("cannot rename directory without a library") + } + dirPath = path.Join("/", dirPath) + + // This call *cannot* handle json parameters in the body, so we have to build the request body manually + postParameters := url.Values{ + "operation": {"rename"}, + "newname": {f.opt.Enc.FromStandardPath(newName)}, + } + + opts := rest.Opts{ + Method: "POST", + Path: APIv20 + libraryID + "/dir/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(dirPath)}}, + ContentType: "application/x-www-form-urlencoded", + Body: bytes.NewBuffer([]byte(postParameters.Encode())), + NoResponse: true, + } + + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.Call(ctx, &opts) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return fs.ErrorPermissionDenied + } + } + return errors.Wrap(err, "failed to rename directory") + } + return nil +} + +func (f *Fs) moveDir(ctx context.Context, srcLibraryID, srcDir, srcName, dstLibraryID, dstPath string) error { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/files-directories-batch-op.md#user-content-Batch%20Move%20Items%20Synchronously + if srcLibraryID == "" || dstLibraryID == "" || srcName == "" { + return errors.New("libraryID and/or file path argument(s) missing") + } + srcDir = path.Join("/", srcDir) + dstPath = path.Join("/", dstPath) + + opts := rest.Opts{ + Method: "POST", + Path: APIv21 + "sync-batch-move-item/", + NoResponse: true, + } + + request := &api.BatchSourceDestRequest{ + SrcLibraryID: srcLibraryID, + SrcParentDir: f.opt.Enc.FromStandardPath(srcDir), + SrcItems: []string{f.opt.Enc.FromStandardPath(srcName)}, + DstLibraryID: dstLibraryID, + DstParentDir: f.opt.Enc.FromStandardPath(dstPath), + } + + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, nil) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return fs.ErrorPermissionDenied + } + if resp.StatusCode == 404 { + return fs.ErrorObjectNotFound + } + } + return errors.Wrap(err, fmt.Sprintf("failed to move directory '%s' from '%s' to '%s'", srcName, srcDir, dstPath)) + } + + return nil +} + +func (f *Fs) deleteDir(ctx context.Context, libraryID, filePath string) error { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-Delete%20Directory + if libraryID == "" { + return errors.New("cannot delete directory without a library") + } + filePath = path.Join("/", filePath) + + opts := rest.Opts{ + Method: "DELETE", + Path: APIv20 + libraryID + "/dir/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, + NoResponse: true, + } + + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, nil) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return fs.ErrorPermissionDenied + } + } + return errors.Wrap(err, "failed to delete directory") + } + return nil +} + +func (f *Fs) getFileDetails(ctx context.Context, libraryID, filePath string) (*api.FileDetail, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Get%20File%20Detail + if libraryID == "" { + return nil, errors.New("cannot open file without a library") + } + filePath = path.Join("/", filePath) + + opts := rest.Opts{ + Method: "GET", + Path: APIv20 + libraryID + "/file/detail/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, + } + result := &api.FileDetail{} + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 404 { + return nil, fs.ErrorObjectNotFound + } + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + } + return nil, errors.Wrap(err, "failed to get file details") + } + result.Name = f.opt.Enc.ToStandardName(result.Name) + result.Parent = f.opt.Enc.ToStandardPath(result.Parent) + return result, nil +} + +func (f *Fs) deleteFile(ctx context.Context, libraryID, filePath string) error { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Delete%20File + if libraryID == "" { + return errors.New("cannot delete file without a library") + } + filePath = path.Join("/", filePath) + + opts := rest.Opts{ + Method: "DELETE", + Path: APIv20 + libraryID + "/file/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, + NoResponse: true, + } + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, nil) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + return errors.Wrap(err, "failed to delete file") + } + return nil +} + +func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (string, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Download%20File + if libraryID == "" { + return "", errors.New("cannot download file without a library") + } + filePath = path.Join("/", filePath) + + opts := rest.Opts{ + Method: "GET", + Path: APIv20 + libraryID + "/file/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, + } + result := "" + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 404 { + return "", fs.ErrorObjectNotFound + } + } + return "", errors.Wrap(err, "failed to get download link") + } + return result, nil +} + +func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs.OpenOption) (io.ReadCloser, error) { + // Check if we need to download partial content + var start, end int64 = 0, size + partialContent := false + for _, option := range options { + switch x := option.(type) { + case *fs.SeekOption: + start = x.Offset + partialContent = true + case *fs.RangeOption: + if x.Start >= 0 { + start = x.Start + if x.End > 0 && x.End < size { + end = x.End + 1 + } + } else { + // {-1, 20} should load the last 20 characters [len-20:len] + start = size - x.End + } + partialContent = true + default: + if option.Mandatory() { + fs.Logf(nil, "Unsupported mandatory option: %v", option) + } + } + } + // Build the http request + opts := rest.Opts{ + Method: "GET", + RootURL: url, + Options: options, + } + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.Call(ctx, &opts) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 404 { + return nil, fmt.Errorf("file not found '%s'", url) + } + } + return nil, err + } + // Non-encrypted libraries are accepting the HTTP Range header, + // BUT encrypted libraries are simply ignoring it + if partialContent && resp.StatusCode == 200 { + // Partial content was requested through a Range header, but a full content was sent instead + rangeDownloadNotice.Do(func() { + fs.Logf(nil, "%s ignored our request of partial content. This is probably because encrypted libraries are not accepting range requests. Loading this file might be slow!", f.String()) + }) + if start > 0 { + // We need to read and discard the beginning of the data... + _, err = io.CopyN(ioutil.Discard, resp.Body, start) + if err != nil { + return nil, err + } + } + // ... and return a limited reader for the remaining of the data + return readers.NewLimitedReadCloser(resp.Body, end-start), nil + } + return resp.Body, nil +} + +func (f *Fs) getUploadLink(ctx context.Context, libraryID string) (string, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file-upload.md + if libraryID == "" { + return "", errors.New("cannot upload file without a library") + } + opts := rest.Opts{ + Method: "GET", + Path: APIv20 + libraryID + "/upload-link/", + } + result := "" + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return "", fs.ErrorPermissionDenied + } + } + return "", errors.Wrap(err, "failed to get upload link") + } + return result, nil +} + +func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath string) (*api.FileDetail, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file-upload.md + fileDir, filename := path.Split(filePath) + parameters := url.Values{ + "parent_dir": {"/"}, + "relative_path": {f.opt.Enc.FromStandardPath(fileDir)}, + "need_idx_progress": {"true"}, + "replace": {"1"}, + } + formReader, contentType, _, err := rest.MultipartUpload(in, parameters, "file", f.opt.Enc.FromStandardName(filename)) + if err != nil { + return nil, errors.Wrap(err, "failed to make multipart upload") + } + + opts := rest.Opts{ + Method: "POST", + RootURL: uploadLink, + Body: formReader, + ContentType: contentType, + Parameters: url.Values{"ret-json": {"1"}}, // It needs to be on the url, not in the body parameters + } + result := make([]api.FileDetail, 1) + var resp *http.Response + // If an error occurs during the call, do not attempt to retry: The upload link is single use only + err = f.pacer.CallNoRetry(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetryUpload(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + if resp.StatusCode == 500 { + // This is a temporary error - we will get a new upload link before retrying + return nil, ErrorInternalDuringUpload + } + } + return nil, errors.Wrap(err, "failed to upload file") + } + if len(result) > 0 { + result[0].Parent = f.opt.Enc.ToStandardPath(result[0].Parent) + result[0].Name = f.opt.Enc.ToStandardName(result[0].Name) + return &result[0], nil + } + return nil, nil +} + +func (f *Fs) listShareLinks(ctx context.Context, libraryID, remote string) ([]api.SharedLink, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/share-links.md#user-content-List%20Share%20Link%20of%20a%20Folder%20(File) + if libraryID == "" { + return nil, errors.New("cannot get share links without a library") + } + remote = path.Join("/", remote) + + opts := rest.Opts{ + Method: "GET", + Path: "api/v2.1/share-links/", + Parameters: url.Values{"repo_id": {libraryID}, "path": {f.opt.Enc.FromStandardPath(remote)}}, + } + result := make([]api.SharedLink, 1) + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + if resp.StatusCode == 404 { + return nil, fs.ErrorObjectNotFound + } + } + return nil, errors.Wrap(err, "failed to list shared links") + } + return result, nil +} + +// createShareLink will only work with non-encrypted libraries +func (f *Fs) createShareLink(ctx context.Context, libraryID, remote string) (*api.SharedLink, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/share-links.md#user-content-Create%20Share%20Link + if libraryID == "" { + return nil, errors.New("cannot create a shared link without a library") + } + remote = path.Join("/", remote) + + opts := rest.Opts{ + Method: "POST", + Path: "api/v2.1/share-links/", + } + request := &api.ShareLinkRequest{ + LibraryID: libraryID, + Path: f.opt.Enc.FromStandardPath(remote), + } + result := &api.SharedLink{} + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + if resp.StatusCode == 404 { + return nil, fs.ErrorObjectNotFound + } + } + return nil, errors.Wrap(err, "failed to create a shared link") + } + return result, nil +} + +func (f *Fs) copyFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Copy%20File + // It's using the api/v2.1 which is not in the documentation (as of Apr 2020) but works better than api2 + if srcLibraryID == "" || dstLibraryID == "" { + return nil, errors.New("libraryID and/or file path argument(s) missing") + } + srcPath = path.Join("/", srcPath) + dstPath = path.Join("/", dstPath) + + opts := rest.Opts{ + Method: "POST", + Path: APIv21 + srcLibraryID + "/file/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}}, + } + request := &api.FileOperationRequest{ + Operation: api.CopyFileOperation, + DestinationLibraryID: dstLibraryID, + DestinationPath: f.opt.Enc.FromStandardPath(dstPath), + } + result := &api.FileInfo{} + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + if resp.StatusCode == 404 { + fs.Debugf(nil, "Copy: %s", err) + return nil, fs.ErrorObjectNotFound + } + } + return nil, errors.Wrap(err, fmt.Sprintf("failed to copy file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath)) + } + return f.decodeFileInfo(result), nil +} + +func (f *Fs) moveFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Move%20File + // It's using the api/v2.1 which is not in the documentation (as of Apr 2020) but works better than api2 + if srcLibraryID == "" || dstLibraryID == "" { + return nil, errors.New("libraryID and/or file path argument(s) missing") + } + srcPath = path.Join("/", srcPath) + dstPath = path.Join("/", dstPath) + + opts := rest.Opts{ + Method: "POST", + Path: APIv21 + srcLibraryID + "/file/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}}, + } + request := &api.FileOperationRequest{ + Operation: api.MoveFileOperation, + DestinationLibraryID: dstLibraryID, + DestinationPath: f.opt.Enc.FromStandardPath(dstPath), + } + result := &api.FileInfo{} + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + if resp.StatusCode == 404 { + fs.Debugf(nil, "Move: %s", err) + return nil, fs.ErrorObjectNotFound + } + } + return nil, errors.Wrap(err, fmt.Sprintf("failed to move file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath)) + } + return f.decodeFileInfo(result), nil +} + +func (f *Fs) renameFile(ctx context.Context, libraryID, filePath, newname string) (*api.FileInfo, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Rename%20File + // It's using the api/v2.1 which is not in the documentation (as of Apr 2020) but works better than api2 + if libraryID == "" || newname == "" { + return nil, errors.New("libraryID and/or file path argument(s) missing") + } + filePath = path.Join("/", filePath) + + opts := rest.Opts{ + Method: "POST", + Path: APIv21 + libraryID + "/file/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, + } + request := &api.FileOperationRequest{ + Operation: api.RenameFileOperation, + NewName: f.opt.Enc.FromStandardName(newname), + } + result := &api.FileInfo{} + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + if resp.StatusCode == 404 { + fs.Debugf(nil, "Rename: %s", err) + return nil, fs.ErrorObjectNotFound + } + } + return nil, errors.Wrap(err, fmt.Sprintf("failed to rename file '%s' to '%s'", filePath, newname)) + } + return f.decodeFileInfo(result), nil +} + +func (f *Fs) decodeFileInfo(input *api.FileInfo) *api.FileInfo { + input.Name = f.opt.Enc.ToStandardName(input.Name) + input.Path = f.opt.Enc.ToStandardPath(input.Path) + return input +} + +func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/libraries.md#user-content-Clean%20Library%20Trash + if libraryID == "" { + return errors.New("cannot clean up trash without a library") + } + opts := rest.Opts{ + Method: "DELETE", + Path: APIv21 + libraryID + "/trash/", + NoResponse: true, + } + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, nil) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return fs.ErrorPermissionDenied + } + if resp.StatusCode == 404 { + return fs.ErrorObjectNotFound + } + } + return errors.Wrap(err, "failed empty the library trash") + } + return nil +} + +// === API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020) +// === getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6, +// === the others can probably be removed after the API v2.1 is documented + +func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath string) ([]api.DirEntry, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory + if libraryID == "" { + return nil, errors.New("cannot list files without a library") + } + dirPath = path.Join("/", dirPath) + + opts := rest.Opts{ + Method: "GET", + Path: APIv20 + libraryID + "/dir/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(dirPath)}}, + } + result := make([]api.DirEntry, 1) + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 404 { + return nil, fs.ErrorDirNotFound + } + if resp.StatusCode == 440 { + // Encrypted library and password not provided + return nil, fs.ErrorPermissionDenied + } + } + return nil, errors.Wrap(err, "failed to get directory contents") + } + + // Clean up encoded names + for index, fileInfo := range result { + fileInfo.Name = f.opt.Enc.ToStandardName(fileInfo.Name) + fileInfo.Path = f.opt.Enc.ToStandardPath(fileInfo.Path) + result[index] = fileInfo + } + return result, nil +} + +func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Copy%20File + if srcLibraryID == "" || dstLibraryID == "" { + return nil, errors.New("libraryID and/or file path argument(s) missing") + } + srcPath = path.Join("/", srcPath) + dstPath = path.Join("/", dstPath) + + // Older API does not seem to accept JSON input here either + postParameters := url.Values{ + "operation": {"copy"}, + "dst_repo": {dstLibraryID}, + "dst_dir": {f.opt.Enc.FromStandardPath(dstPath)}, + } + opts := rest.Opts{ + Method: "POST", + Path: APIv20 + srcLibraryID + "/file/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}}, + ContentType: "application/x-www-form-urlencoded", + Body: bytes.NewBuffer([]byte(postParameters.Encode())), + } + result := &api.FileInfo{} + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.Call(ctx, &opts) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 403 { + return nil, fs.ErrorPermissionDenied + } + } + return nil, errors.Wrap(err, fmt.Sprintf("failed to copy file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath)) + } + err = rest.DecodeJSON(resp, &result) + if err != nil { + return nil, err + } + return f.decodeFileInfo(result), nil +} + +func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname string) error { + // API Documentation + // https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Rename%20File + if libraryID == "" || newname == "" { + return errors.New("libraryID and/or file path argument(s) missing") + } + filePath = path.Join("/", filePath) + + // No luck with JSON input with the older api2 + postParameters := url.Values{ + "operation": {"rename"}, + "reloaddir": {"true"}, // This is an undocumented trick to avoid a http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py) + "newname": {f.opt.Enc.FromStandardName(newname)}, + } + + opts := rest.Opts{ + Method: "POST", + Path: APIv20 + libraryID + "/file/", + Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}}, + ContentType: "application/x-www-form-urlencoded", + Body: bytes.NewBuffer([]byte(postParameters.Encode())), + NoRedirect: true, + NoResponse: true, + } + var resp *http.Response + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.Call(ctx, &opts) + return f.shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp != nil { + if resp.StatusCode == 301 { + // This is the normal response from the server + return nil + } + if resp.StatusCode == 403 { + return fs.ErrorPermissionDenied + } + if resp.StatusCode == 404 { + return fs.ErrorObjectNotFound + } + } + return errors.Wrap(err, "failed to rename file") + } + return nil +} diff --git a/bin/make_manual.py b/bin/make_manual.py index c85ef44d4..cf2260afb 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -54,6 +54,7 @@ docs = [ "pcloud.md", "premiumizeme.md", "putio.md", + "seafile.md", "sftp.md", "sugarsync.md", "union.md", diff --git a/docs/content/about.md b/docs/content/about.md index c306a052b..9a50cb67f 100644 --- a/docs/content/about.md +++ b/docs/content/about.md @@ -51,6 +51,7 @@ Rclone is a command line program to sync files and directories to and from: * {{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}} * {{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}} * {{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}} +* {{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}} * {{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}} * {{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}} * {{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}} diff --git a/docs/content/docs.md b/docs/content/docs.md index f0e3a98e4..1e81578a0 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -50,6 +50,7 @@ See the following for detailed instructions for * [premiumize.me](/premiumizeme/) * [put.io](/putio/) * [QingStor](/qingstor/) + * [Seafile](/seafile/) * [SFTP](/sftp/) * [SugarSync](/sugarsync/) * [Union](/union/) diff --git a/docs/content/overview.md b/docs/content/overview.md index fecb400d0..85544b002 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -43,6 +43,7 @@ Here is an overview of the major features of each cloud storage system. | premiumize.me | - | No | Yes | No | R | | put.io | CRC-32 | Yes | No | Yes | R | | QingStor | MD5 | No | No | No | R/W | +| Seafile | - | No | No | No | - | | SFTP | MD5, SHA1 ‡ | Yes | Depends | No | - | | SugarSync | - | No | No | No | - | | WebDAV | MD5, SHA1 ††| Yes ††† | Depends | No | - | @@ -342,6 +343,7 @@ operations more efficient. | premiumize.me | Yes | No | Yes | Yes | No | No | No | Yes | Yes | Yes | | put.io | Yes | No | Yes | Yes | Yes | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes | | QingStor | No | Yes | No | No | Yes | Yes | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No | +| Seafile | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | | SFTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes | | SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes | | WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes | diff --git a/docs/content/seafile.md b/docs/content/seafile.md new file mode 100644 index 000000000..e517b6377 --- /dev/null +++ b/docs/content/seafile.md @@ -0,0 +1,251 @@ +--- +title: "Seafile" +description: "Seafile" +date: "2020-05-02" +--- + +Seafile +---------------------------------------- + +This is a backend for the [Seafile](https://www.seafile.com/) storage service. +It works with both the free community edition, or the professional edition. +Seafile versions 6.x and 7.x are all supported. +Encrypted libraries are also supported. + +### Root mode vs Library mode ### + +There are two distinct modes you can setup your remote: +- you point your remote to the **root of the server**, meaning you don't specify a library during the configuration: +Paths are specified as `remote:library`. You may put subdirectories in too, eg `remote:library/path/to/dir`. +- you point your remote to a specific library during the configuration: +Paths are specified as `remote:path/to/dir`. **This is the recommended mode when using encrypted libraries**. + +### Configuration in root mode ### + +Here is an example of making a seafile configuration. First run + + rclone config + +This will guide you through an interactive setup process. To authenticate +you will need the URL of your server, your email (or username) and your password. + +``` +No remotes found - make a new one +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> seafile +Type of storage to configure. +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value +[snip] +XX / Seafile + \ "seafile" +[snip] +Storage> seafile +** See help for seafile backend at: https://rclone.org/seafile/ ** + +URL of seafile host to connect to +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value + 1 / Connect to cloud.seafile.com + \ "https://cloud.seafile.com/" +url> http://my.seafile.server/ +User name +Enter a string value. Press Enter for the default (""). +user> me@example.com +Password +y) Yes type in my own password +g) Generate random password +y/g> y +Enter the password: +password: +Confirm the password: +password: +Name of the library. Leave blank to access all non-encrypted libraries. +Enter a string value. Press Enter for the default (""). +library> +Library password (for encrypted libraries only). Leave blank if you pass it through the command line. +y) Yes type in my own password +g) Generate random password +n) No leave this optional password blank (default) +y/g/n> n +Edit advanced config? (y/n) +y) Yes +n) No (default) +y/n> n +Remote config +-------------------- +[seafile] +type = seafile +url = http://my.seafile.server/ +user = me@example.com +password = *** ENCRYPTED *** +-------------------- +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +This remote is called `seafile`. It's pointing to the root of your seafile server and can now be used like this + +See all libraries + + rclone lsd seafile: + +Create a new library + + rclone mkdir seafile:library + +List the contents of a library + + rclone ls seafile:library + +Sync `/home/local/directory` to the remote library, deleting any +excess files in the library. + + rclone sync /home/local/directory seafile:library + +### Configuration in library mode ### + +``` +No remotes found - make a new one +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> seafile +Type of storage to configure. +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value +[snip] +XX / Seafile + \ "seafile" +[snip] +Storage> seafile +** See help for seafile backend at: https://rclone.org/seafile/ ** + +URL of seafile host to connect to +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value + 1 / Connect to cloud.seafile.com + \ "https://cloud.seafile.com/" +url> http://my.seafile.server/ +User name +Enter a string value. Press Enter for the default (""). +user> me@example.com +Password +y) Yes type in my own password +g) Generate random password +y/g> y +Enter the password: +password: +Confirm the password: +password: +Name of the library. Leave blank to access all non-encrypted libraries. +Enter a string value. Press Enter for the default (""). +library> My Library +Library password (for encrypted libraries only). Leave blank if you pass it through the command line. +y) Yes type in my own password +g) Generate random password +n) No leave this optional password blank (default) +y/g/n> n +Edit advanced config? (y/n) +y) Yes +n) No (default) +y/n> n +Remote config +-------------------- +[seafile] +type = seafile +url = http://my.seafile.server/ +user = me@example.com +password = *** ENCRYPTED *** +library = My Library +-------------------- +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +You specified `My Library` during the configuration. The root of the remote is pointing at the +root of the library `My Library`: + +See all files in the library: + + rclone lsd seafile: + +Create a new directory inside the library + + rclone mkdir seafile:directory + +List the contents of a directory + + rclone ls seafile:directory + +Sync `/home/local/directory` to the remote library, deleting any +excess files in the library. + + rclone sync /home/local/directory seafile: + + +### --fast-list ### + +Seafile version 7+ supports `--fast-list` which allows you to use fewer +transactions in exchange for more memory. See the [rclone +docs](/docs/#fast-list) for more details. +Please note this is not supported on seafile server version 6.x + + +#### Restricted filename characters + +In addition to the [default restricted characters set](/overview/#restricted-characters) +the following characters are also replaced: + +| Character | Value | Replacement | +| --------- |:-----:|:-----------:| +| / | 0x2F | / | +| " | 0x22 | " | +| \ | 0x5C | \ | + +Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8), +as they can't be used in JSON strings. + +### Seafile and rclone link ### + +Rclone supports generating share links for non-encrypted libraries only. +They can either be for a file or a directory: + +``` +rclone link seafile:seafile-tutorial.doc +http://my.seafile.server/f/fdcd8a2f93f84b8b90f4/ + +``` + +or if run on a directory you will get: + +``` +rclone link seafile:dir +http://my.seafile.server/d/9ea2455f6f55478bbb0d/ +``` + +Please note a share link is unique for each file or directory. If you run a link command on a file/dir +that has already been shared, you will get the exact same link. + +### Compatibility ### + +It has been actively tested using the [seafile docker image](https://github.com/haiwen/seafile-docker) of these versions: +- 6.3.4 community edition +- 7.0.5 community edition +- 7.1.3 community edition + +Versions below 6.0 are not supported. +Versions between 6.0 and 6.3 haven't been tested and might not work properly. + + + + + diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 39f1daedd..61bf57ba3 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -86,6 +86,7 @@
  • pCloud
  • premiumize.me
  • put.io
  • +
  • Seafile
  • SFTP
  • SugarSync
  • Union (merge backends)
  • diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index 9a4671cd2..41ccdf3c5 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -264,3 +264,14 @@ backends: remote: "TestMailru:" subdir: false fastlist: false + - backend: "seafile" + remote: "TestSeafileV6:" + fastlist: false + ignore: + - TestIntegration/FsMkdir/FsPutFiles/FsDirMove + - backend: "seafile" + remote: "TestSeafile:" + fastlist: true + - backend: "seafile" + remote: "TestSeafileEncrypted:" + fastlist: true diff --git a/fstest/testserver/init.d/TestSeafile b/fstest/testserver/init.d/TestSeafile new file mode 100755 index 000000000..14449e04b --- /dev/null +++ b/fstest/testserver/init.d/TestSeafile @@ -0,0 +1,60 @@ +#!/bin/bash + +set -e + +# environment variables passed on docker-compose +export NAME=seafile7 +export MYSQL_ROOT_PASSWORD=pixenij4zacoguq0kopamid6 +export SEAFILE_ADMIN_EMAIL=seafile@rclone.org +export SEAFILE_ADMIN_PASSWORD=pixenij4zacoguq0kopamid6 +export SEAFILE_IP=127.0.0.1 +export SEAFILE_PORT=8087 +export SEAFILE_TEST_DATA=${SEAFILE_TEST_DATA:-/tmp/seafile-test-data} +export SEAFILE_VERSION=latest + +# make sure the data directory exists +mkdir -p ${SEAFILE_TEST_DATA}/${NAME} + +# docker-compose project directory +COMPOSE_DIR=$(dirname "$0")/seafile + +start() { + docker-compose --project-directory ${COMPOSE_DIR} --project-name ${NAME} --file ${COMPOSE_DIR}/docker-compose.yml up -d + + # it takes some time for the database to be created + sleep 60 + + # authentication token answer should be like: {"token":"dbf58423f1632b5b679a13b0929f1d0751d9250c"} + TOKEN=`curl --silent \ + --data-urlencode username=${SEAFILE_ADMIN_EMAIL} -d password=${SEAFILE_ADMIN_PASSWORD} \ + http://${SEAFILE_IP}:${SEAFILE_PORT}/api2/auth-token/ \ + | sed 's/^{"token":"\(.*\)"}$/\1/'` + + # create default library + curl -X POST -H "Authorization: Token ${TOKEN}" "http://${SEAFILE_IP}:${SEAFILE_PORT}/api2/default-repo/" + + echo _connect=${SEAFILE_IP}:${SEAFILE_PORT} + echo type=seafile + echo url=http://${SEAFILE_IP}:${SEAFILE_PORT}/ + echo user=${SEAFILE_ADMIN_EMAIL} + echo pass=$(rclone obscure ${SEAFILE_ADMIN_PASSWORD}) + echo library=My Library +} + +stop() { + if status ; then + docker-compose --project-directory ${COMPOSE_DIR} --project-name ${NAME} --file ${COMPOSE_DIR}/docker-compose.yml down + fi +} + +status() { + if docker ps --format "{{.Names}}" | grep ^${NAME}_seafile_1$ >/dev/null ; then + echo "$NAME running" + else + echo "$NAME not running" + return 1 + fi + return 0 +} + +. $(dirname "$0")/run.bash diff --git a/fstest/testserver/init.d/TestSeafileEncrypted b/fstest/testserver/init.d/TestSeafileEncrypted new file mode 100755 index 000000000..75db82c63 --- /dev/null +++ b/fstest/testserver/init.d/TestSeafileEncrypted @@ -0,0 +1,65 @@ +#!/bin/bash + +set -e + +# local variables +TEST_LIBRARY=Encrypted +TEST_LIBRARY_PASSWORD=SecretKey + +# environment variables passed on docker-compose +export NAME=seafile7encrypted +export MYSQL_ROOT_PASSWORD=pixenij4zacoguq0kopamid6 +export SEAFILE_ADMIN_EMAIL=seafile@rclone.org +export SEAFILE_ADMIN_PASSWORD=pixenij4zacoguq0kopamid6 +export SEAFILE_IP=127.0.0.1 +export SEAFILE_PORT=8088 +export SEAFILE_TEST_DATA=${SEAFILE_TEST_DATA:-/tmp/seafile-test-data} +export SEAFILE_VERSION=latest + +# make sure the data directory exists +mkdir -p ${SEAFILE_TEST_DATA}/${NAME} + +# docker-compose project directory +COMPOSE_DIR=$(dirname "$0")/seafile + +start() { + docker-compose --project-directory ${COMPOSE_DIR} --project-name ${NAME} --file ${COMPOSE_DIR}/docker-compose.yml up -d + + # it takes some time for the database to be created + sleep 60 + + # authentication token answer should be like: {"token":"dbf58423f1632b5b679a13b0929f1d0751d9250c"} + TOKEN=`curl --silent \ + --data-urlencode username=${SEAFILE_ADMIN_EMAIL} -d password=${SEAFILE_ADMIN_PASSWORD} \ + http://${SEAFILE_IP}:${SEAFILE_PORT}/api2/auth-token/ \ + | sed 's/^{"token":"\(.*\)"}$/\1/'` + + # create encrypted library + curl -X POST -d "name=${TEST_LIBRARY}&passwd=${TEST_LIBRARY_PASSWORD}" -H "Authorization: Token ${TOKEN}" "http://${SEAFILE_IP}:${SEAFILE_PORT}/api2/repos/" + + echo _connect=${SEAFILE_IP}:${SEAFILE_PORT} + echo type=seafile + echo url=http://${SEAFILE_IP}:${SEAFILE_PORT}/ + echo user=${SEAFILE_ADMIN_EMAIL} + echo pass=$(rclone obscure ${SEAFILE_ADMIN_PASSWORD}) + echo library=${TEST_LIBRARY} + echo library_key=$(rclone obscure ${TEST_LIBRARY_PASSWORD}) +} + +stop() { + if status ; then + docker-compose --project-directory ${COMPOSE_DIR} --project-name ${NAME} --file ${COMPOSE_DIR}/docker-compose.yml down + fi +} + +status() { + if docker ps --format "{{.Names}}" | grep ^${NAME}_seafile_1$ >/dev/null ; then + echo "$NAME running" + else + echo "$NAME not running" + return 1 + fi + return 0 +} + +. $(dirname "$0")/run.bash diff --git a/fstest/testserver/init.d/TestSeafileV6 b/fstest/testserver/init.d/TestSeafileV6 new file mode 100755 index 000000000..bb6cddf1e --- /dev/null +++ b/fstest/testserver/init.d/TestSeafileV6 @@ -0,0 +1,48 @@ +#!/bin/bash + +set -e + +# local variables +NAME=seafile6 +SEAFILE_IP=127.0.0.1 +SEAFILE_PORT=8086 +SEAFILE_ADMIN_EMAIL=seafile@rclone.org +SEAFILE_ADMIN_PASSWORD=qebiwob7wafixif8sojiboj4 +SEAFILE_TEST_DATA=${SEAFILE_TEST_DATA:-/tmp/seafile-test-data} +SEAFILE_VERSION=latest + +. $(dirname "$0")/docker.bash + +start() { + # make sure the data directory exists + mkdir -p ${SEAFILE_TEST_DATA}/${NAME} + + docker run --rm -d --name $NAME \ + -e SEAFILE_SERVER_HOSTNAME=${SEAFILE_IP}:${SEAFILE_PORT} \ + -e SEAFILE_ADMIN_EMAIL=${SEAFILE_ADMIN_EMAIL} \ + -e SEAFILE_ADMIN_PASSWORD=${SEAFILE_ADMIN_PASSWORD} \ + -v ${SEAFILE_TEST_DATA}/${NAME}:/shared \ + -p ${SEAFILE_IP}:${SEAFILE_PORT}:80 \ + seafileltd/seafile:${SEAFILE_VERSION} + + # it takes some time for the database to be created + sleep 60 + + # authentication token answer should be like: {"token":"dbf58423f1632b5b679a13b0929f1d0751d9250c"} + TOKEN=`curl --silent \ + --data-urlencode username=${SEAFILE_ADMIN_EMAIL} -d password=${SEAFILE_ADMIN_PASSWORD} \ + http://${SEAFILE_IP}:${SEAFILE_PORT}/api2/auth-token/ \ + | sed 's/^{"token":"\(.*\)"}$/\1/'` + + # create default library + curl -X POST -H "Authorization: Token ${TOKEN}" "http://${SEAFILE_IP}:${SEAFILE_PORT}/api2/default-repo/" + + echo _connect=${SEAFILE_IP}:${SEAFILE_PORT} + echo type=seafile + echo url=http://${SEAFILE_IP}:${SEAFILE_PORT}/ + echo user=${SEAFILE_ADMIN_EMAIL} + echo pass=$(rclone obscure ${SEAFILE_ADMIN_PASSWORD}) + echo library=My Library +} + +. $(dirname "$0")/run.bash diff --git a/fstest/testserver/init.d/seafile/docker-compose.yml b/fstest/testserver/init.d/seafile/docker-compose.yml new file mode 100644 index 000000000..55d8e2f69 --- /dev/null +++ b/fstest/testserver/init.d/seafile/docker-compose.yml @@ -0,0 +1,31 @@ +version: '2.0' +services: + db: + image: mariadb:10.1 + environment: + - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} + - MYSQL_LOG_CONSOLE=true + volumes: + - ${SEAFILE_TEST_DATA}/${NAME}/seafile-mysql/db:/var/lib/mysql + + memcached: + image: memcached:1.5.6 + entrypoint: memcached -m 256 + + seafile: + image: seafileltd/seafile-mc:${SEAFILE_VERSION} + ports: + - "${SEAFILE_IP}:${SEAFILE_PORT}:80" + volumes: + - ${SEAFILE_TEST_DATA}/${NAME}/seafile-data:/shared + environment: + - DB_HOST=db + - DB_ROOT_PASSWD=${MYSQL_ROOT_PASSWORD} + - TIME_ZONE=Etc/UTC + - SEAFILE_ADMIN_EMAIL=${SEAFILE_ADMIN_EMAIL} + - SEAFILE_ADMIN_PASSWORD=${SEAFILE_ADMIN_PASSWORD} + - SEAFILE_SERVER_LETSENCRYPT=false + - SEAFILE_SERVER_HOSTNAME=${SEAFILE_IP}:${SEAFILE_PORT} + depends_on: + - db + - memcached