diff --git a/README.md b/README.md index 0f0737469..06c88f78a 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ Rclone is a command line program to sync files and directories to and from * Mega * Microsoft Azure Blob Storage * Microsoft OneDrive + * OpenDrive * Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage * pCloud * QingStor diff --git a/backend/all/all.go b/backend/all/all.go index 63a1a8bf3..0088aafa2 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -18,6 +18,7 @@ import ( _ "github.com/ncw/rclone/backend/local" _ "github.com/ncw/rclone/backend/mega" _ "github.com/ncw/rclone/backend/onedrive" + _ "github.com/ncw/rclone/backend/opendrive" _ "github.com/ncw/rclone/backend/pcloud" _ "github.com/ncw/rclone/backend/qingstor" _ "github.com/ncw/rclone/backend/s3" diff --git a/opendrive/opendrive.go b/backend/opendrive/opendrive.go similarity index 55% rename from opendrive/opendrive.go rename to backend/opendrive/opendrive.go index 8091a5cff..0b2a0f9aa 100644 --- a/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -5,16 +5,22 @@ import ( "io" "mime/multipart" "net/http" + "net/url" + "path" "strconv" "strings" "time" "fmt" - "github.com/ncw/rclone/dircache" "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/pacer" - "github.com/ncw/rclone/rest" + "github.com/ncw/rclone/fs/config/obscure" + "github.com/ncw/rclone/fs/fserrors" + "github.com/ncw/rclone/fs/fshttp" + "github.com/ncw/rclone/fs/hash" + "github.com/ncw/rclone/lib/dircache" + "github.com/ncw/rclone/lib/pacer" + "github.com/ncw/rclone/lib/rest" "github.com/pkg/errors" ) @@ -31,7 +37,7 @@ const ( func init() { fs.Register(&fs.RegInfo{ Name: "opendrive", - Description: "OpenDRIVE", + Description: "OpenDrive", NewFs: NewFs, Options: []fs.Option{{ Name: "username", @@ -47,6 +53,7 @@ func init() { // Fs represents a remote b2 server type Fs struct { name string // name of this remote + root string // the path we are working on features *fs.Features // optional features username string // account name password string // auth key0 @@ -72,6 +79,14 @@ func parsePath(path string) (root string) { return } +// mimics url.PathEscape which only available from go 1.8 +func pathEscape(path string) string { + u := url.URL{ + Path: path, + } + return u.EscapedPath() +} + // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) @@ -81,12 +96,12 @@ func (f *Fs) Name() string { // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { - return "/" + return f.root } // String converts this Fs to a string func (f *Fs) String() string { - return "OpenDRIVE" + return fmt.Sprintf("OpenDrive root '%s'", f.root) } // Features returns the optional features of this Fs @@ -95,13 +110,8 @@ func (f *Fs) Features() *fs.Features { } // Hashes returns the supported hash sets. -func (f *Fs) Hashes() fs.HashSet { - return fs.HashSet(fs.HashMD5) -} - -// List walks the path returning iles and directories into out -func (f *Fs) List(out fs.ListOpts, dir string) { - f.dirCache.List(f, out, dir) +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.MD5) } // NewFs contstructs an Fs from the path, bucket:path @@ -112,7 +122,7 @@ func NewFs(name, root string) (fs.Fs, error) { if username == "" { return nil, errors.New("username not found") } - password, err := fs.Reveal(fs.ConfigFileGet(name, "password")) + password, err := obscure.Reveal(fs.ConfigFileGet(name, "password")) if err != nil { return nil, errors.New("password coudl not revealed") } @@ -120,14 +130,15 @@ func NewFs(name, root string) (fs.Fs, error) { return nil, errors.New("password not found") } - fs.Debugf(nil, "OpenDRIVE-user: %s", username) - fs.Debugf(nil, "OpenDRIVE-pass: %s", password) + fs.Debugf(nil, "OpenDrive-user: %s", username) + fs.Debugf(nil, "OpenDrive-pass: %s", password) f := &Fs{ name: name, username: username, password: password, - srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler), + root: root, + srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), } @@ -151,8 +162,8 @@ func NewFs(name, root string) (fs.Fs, error) { if err != nil { return nil, errors.Wrap(err, "failed to create session") } - - fs.Debugf(nil, "Starting OpenDRIVE session with ID: %s", f.session.SessionID) + resp.Body.Close() + fs.Debugf(nil, "Starting OpenDrive session with ID: %s", f.session.SessionID) f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) @@ -163,6 +174,7 @@ func NewFs(name, root string) (fs.Fs, error) { newRoot, remote := dircache.SplitPath(root) newF := *f newF.dirCache = dircache.New(newRoot, "0", &newF) + newF.root = newRoot // Make new Fs which is the parent err = newF.dirCache.FindRoot(false) @@ -184,6 +196,14 @@ func NewFs(name, root string) (fs.Fs, error) { return f, nil } +// rootSlash returns root with a slash on if it is empty, otherwise empty string +func (f *Fs) rootSlash() string { + if f.root == "" { + return f.root + } + return f.root + "/" +} + // errorHandler parses a non 2xx error response into an error func errorHandler(resp *http.Response) error { // Decode error response @@ -205,7 +225,7 @@ func errorHandler(resp *http.Response) error { return nil } -// Mkdir creates the bucket if it doesn't exist +// Mkdir creates the folder if it doesn't exist func (f *Fs) Mkdir(dir string) error { fs.Debugf(nil, "Mkdir(\"%s\")", dir) err := f.dirCache.FindRoot(true) @@ -218,42 +238,278 @@ func (f *Fs) Mkdir(dir string) error { return err } -// Rmdir deletes the bucket if the fs is at the root +// deleteObject removes an object by ID +func (f *Fs) deleteObject(id string) error { + return f.pacer.Call(func() (bool, error) { + removeDirData := removeFolder{SessionID: f.session.SessionID, FolderID: id} + opts := rest.Opts{ + Method: "POST", + NoResponse: true, + Path: "/folder/remove.json", + } + resp, err := f.srv.CallJSON(&opts, &removeDirData, nil) + return f.shouldRetry(resp, err) + }) +} + +// purgeCheck remotes the root directory, if check is set then it +// refuses to do so if it has anything in +func (f *Fs) purgeCheck(dir string, check bool) error { + root := path.Join(f.root, dir) + if root == "" { + return errors.New("can't purge root directory") + } + dc := f.dirCache + err := dc.FindRoot(false) + if err != nil { + return err + } + rootID, err := dc.FindDir(dir, false) + if err != nil { + return err + } + item, err := f.readMetaDataForFolderID(rootID) + if err != nil { + return err + } + if check && len(item.Files) != 0 { + return errors.New("folder not empty") + } + err = f.deleteObject(rootID) + if err != nil { + return err + } + f.dirCache.FlushDir(dir) + if err != nil { + return err + } + return nil +} + +// Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(dir string) error { - fs.Debugf(nil, "Rmdir(\"%s\")", dir) - // if f.root != "" || dir != "" { - // return nil - // } - // opts := rest.Opts{ - // Method: "POST", - // Path: "/b2_delete_bucket", - // } - // bucketID, err := f.getBucketID() - // if err != nil { - // return err - // } - // var request = api.DeleteBucketRequest{ - // ID: bucketID, - // AccountID: f.info.AccountID, - // } - // var response api.Bucket - // err = f.pacer.Call(func() (bool, error) { - // resp, err := f.srv.CallJSON(&opts, &request, &response) - // return f.shouldRetry(resp, err) - // }) - // if err != nil { - // return errors.Wrap(err, "failed to delete bucket") - // } - // f.clearBucketID() - // f.clearUploadURL() - return nil + fs.Debugf(nil, "Rmdir(\"%s\")", path.Join(f.root, dir)) + return f.purgeCheck(dir, true) } // Precision of the remote func (f *Fs) Precision() time.Duration { - return time.Millisecond + return time.Second +} + +// Copy src to this remote using server side copy operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { + fs.Debugf(nil, "Copy(%v)", remote) + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + err := srcObj.readMetaData() + if err != nil { + return nil, err + } + + srcPath := srcObj.fs.rootSlash() + srcObj.remote + dstPath := f.rootSlash() + remote + if strings.ToLower(srcPath) == strings.ToLower(dstPath) { + return nil, errors.Errorf("Can't copy %q -> %q as are same name when lowercase", srcPath, dstPath) + } + + // Create temporary object + dstObj, _, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + if err != nil { + return nil, err + } + fs.Debugf(nil, "...%#v\n...%#v", remote, directoryID) + + // Copy the object + var resp *http.Response + response := copyFileResponse{} + err = f.pacer.Call(func() (bool, error) { + copyFileData := copyFile{ + SessionID: f.session.SessionID, + SrcFileID: srcObj.id, + DstFolderID: directoryID, + Move: "false", + OverwriteIfExists: "true", + } + opts := rest.Opts{ + Method: "POST", + Path: "/file/move_copy.json", + } + resp, err = f.srv.CallJSON(&opts, ©FileData, &response) + return f.shouldRetry(resp, err) + }) + if err != nil { + return nil, err + } + resp.Body.Close() + + size, _ := strconv.ParseInt(response.Size, 10, 64) + dstObj.id = response.FileID + dstObj.size = size + + return dstObj, nil +} + +// Move src to this remote using server side move operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { + fs.Debugf(nil, "Move(%v)", remote) + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantCopy + } + err := srcObj.readMetaData() + if err != nil { + return nil, err + } + + // Create temporary object + dstObj, _, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) + if err != nil { + return nil, err + } + + // Copy the object + var resp *http.Response + response := copyFileResponse{} + err = f.pacer.Call(func() (bool, error) { + copyFileData := copyFile{ + SessionID: f.session.SessionID, + SrcFileID: srcObj.id, + DstFolderID: directoryID, + Move: "true", + OverwriteIfExists: "true", + } + opts := rest.Opts{ + Method: "POST", + Path: "/file/move_copy.json", + } + resp, err = f.srv.CallJSON(&opts, ©FileData, &response) + return f.shouldRetry(resp, err) + }) + if err != nil { + return nil, err + } + resp.Body.Close() + + size, _ := strconv.ParseInt(response.Size, 10, 64) + dstObj.id = response.FileID + dstObj.size = size + + return dstObj, nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { + fs.Debugf(nil, "DirMove(%v)", src.Root()) + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(src, "DirMove error: not same remote type") + return fs.ErrorCantDirMove + } + srcPath := path.Join(srcFs.root, srcRemote) + + // Refuse to move to or from the root + if srcPath == "" { + fs.Debugf(src, "DirMove error: Can't move root") + return errors.New("can't move root directory") + } + + // find the root src directory + err = srcFs.dirCache.FindRoot(false) + if err != nil { + return err + } + + // Find ID of src parent + srcDirectoryID, err := srcFs.dirCache.FindDir(srcRemote, false) + if err != nil { + return err + } + + // Find ID of dst parent, creating subdirs if necessary + findPath := dstRemote + if dstRemote == "" { + findPath = f.root + } + dstDirectoryID, err := f.dirCache.FindDir(findPath, true) + if err != nil { + return err + } + + // Check destination does not exist + if dstRemote != "" { + _, err = f.dirCache.FindDir(dstRemote, false) + if err == fs.ErrorDirNotFound { + // OK + } else if err != nil { + return err + } else { + return fs.ErrorDirExists + } + } + + var resp *http.Response + response := moveFolderResponse{} + err = f.pacer.Call(func() (bool, error) { + moveFolderData := moveFolder{ + SessionID: f.session.SessionID, + FolderID: srcDirectoryID, + DstFolderID: dstDirectoryID, + Move: "true", + } + opts := rest.Opts{ + Method: "POST", + Path: "/folder/move_copy.json", + } + resp, err = f.srv.CallJSON(&opts, &moveFolderData, &response) + return f.shouldRetry(resp, err) + }) + if err != nil { + fs.Debugf(src, "DirMove error %v", err) + return err + } + resp.Body.Close() + + srcFs.dirCache.FlushDir(srcRemote) + return nil +} + +// Purge deletes all the files and the container +// +// Optional interface: Only implement this if you have a way of +// deleting all the files quicker than just running Remove() on the +// result of List() +func (f *Fs) Purge() error { + return f.purgeCheck("", false) } // Return an Object from a path @@ -270,6 +526,7 @@ func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) { id: file.FileID, modTime: time.Unix(file.DateModified, 0), size: file.Size, + md5: file.FileHash, } } else { o = &Object{ @@ -282,14 +539,13 @@ func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) { return nil, err } } - fs.Debugf(nil, "%v", o) return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(remote string) (fs.Object, error) { - fs.Debugf(nil, "NewObject(\"%s\"", remote) + fs.Debugf(nil, "NewObject(\"%s\")", remote) return f.newObjectWithInfo(remote, nil) } @@ -305,6 +561,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje if err != nil { return nil, leaf, directoryID, err } + fs.Debugf(nil, "\n...leaf %#v\n...id %#v", leaf, directoryID) // Temporary Object under construction o = &Object{ fs: f, @@ -313,6 +570,27 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje return o, leaf, directoryID, nil } +// readMetaDataForPath reads the metadata from the path +func (f *Fs) readMetaDataForFolderID(id string) (info *FolderList, err error) { + var resp *http.Response + opts := rest.Opts{ + Method: "GET", + Path: "/folder/list.json/" + f.session.SessionID + "/" + id, + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, nil, &info) + return f.shouldRetry(resp, err) + }) + if err != nil { + return nil, err + } + if resp != nil { + resp.Body.Close() + } + + return info, err +} + // Put the object into the bucket // // Copy the reader in to the new object which is returned @@ -325,10 +603,36 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs. fs.Debugf(nil, "Put(%s)", remote) - o, _, _, err := f.createObject(remote, modTime, size) + o, leaf, directoryID, err := f.createObject(remote, modTime, size) if err != nil { return nil, err } + + if "" == o.id { + o.readMetaData() + } + + if "" == o.id { + // We need to create a ID for this file + var resp *http.Response + response := createFileResponse{} + err := o.fs.pacer.Call(func() (bool, error) { + createFileData := createFile{SessionID: o.fs.session.SessionID, FolderID: directoryID, Name: replaceReservedChars(leaf)} + opts := rest.Opts{ + Method: "POST", + Path: "/upload/create_file.json", + } + resp, err = o.fs.srv.CallJSON(&opts, &createFileData, &response) + return o.fs.shouldRetry(resp, err) + }) + if err != nil { + return nil, errors.Wrap(err, "failed to create file") + } + resp.Body.Close() + + o.id = response.FileID + } + return o, o.Update(in, src, options...) } @@ -347,43 +651,39 @@ var retryErrorCodes = []int{ // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { - // if resp != nil { - // if resp.StatusCode == 401 { - // f.tokenRenewer.Invalidate() - // fs.Debugf(f, "401 error received - invalidating token") - // return true, err - // } - // // Work around receiving this error sporadically on authentication - // // - // // HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"} - // if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") { - // fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry") - // return true, err - // } - // } - return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } -// DirCacher methos +// DirCacher methods // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { - fs.Debugf(nil, "CreateDir(\"%s\", \"%s\")", pathID, leaf) - // //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf) - // folder := acd.FolderFromId(pathID, f.c.Nodes) - // var resp *http.Response - // var info *acd.Folder - // err = f.pacer.Call(func() (bool, error) { - // info, resp, err = folder.CreateFolder(leaf) - // return f.shouldRetry(resp, err) - // }) - // if err != nil { - // //fmt.Printf("...Error %v\n", err) - // return "", err - // } - // //fmt.Printf("...Id %q\n", *info.Id) - // return *info.Id, nil - return "", fmt.Errorf("CreateDir not implemented") + fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, replaceReservedChars(leaf)) + var resp *http.Response + response := createFolderResponse{} + err = f.pacer.Call(func() (bool, error) { + createDirData := createFolder{ + SessionID: f.session.SessionID, + FolderName: replaceReservedChars(leaf), + FolderSubParent: pathID, + FolderIsPublic: 0, + FolderPublicUpl: 0, + FolderPublicDisplay: 0, + FolderPublicDnl: 0, + } + opts := rest.Opts{ + Method: "POST", + Path: "/folder.json", + } + resp, err = f.srv.CallJSON(&opts, &createDirData, &response) + return f.shouldRetry(resp, err) + }) + if err != nil { + return "", err + } + resp.Body.Close() + + return response.FolderID, nil } // FindLeaf finds a directory of name leaf in the folder with ID pathID @@ -391,7 +691,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er fs.Debugf(nil, "FindLeaf(\"%s\", \"%s\")", pathID, leaf) if pathID == "0" && leaf == "" { - fs.Debugf(nil, "Found OpenDRIVE root") + fs.Debugf(nil, "Found OpenDrive root") // that's the root directory return pathID, true, nil } @@ -410,8 +710,10 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er if err != nil { return "", false, errors.Wrap(err, "failed to get folder list") } + resp.Body.Close() for _, folder := range folderList.Folders { + folder.Name = restoreReservedChars(folder.Name) fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID) if leaf == folder.Name { @@ -423,55 +725,64 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er return "", false, nil } -// ListDir reads the directory specified by the job into out, returning any more jobs -func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) { - fs.Debugf(nil, "ListDir(%v, %v)", out, job) - // get the folderIDs +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { + fs.Debugf(nil, "List(%v)", dir) + err = f.dirCache.FindRoot(false) + if err != nil { + return nil, err + } + directoryID, err := f.dirCache.FindDir(dir, false) + if err != nil { + return nil, err + } + var resp *http.Response + opts := rest.Opts{ + Method: "GET", + Path: "/folder/list.json/" + f.session.SessionID + "/" + directoryID, + } folderList := FolderList{} err = f.pacer.Call(func() (bool, error) { - opts := rest.Opts{ - Method: "GET", - Path: "/folder/list.json/" + f.session.SessionID + "/" + job.DirID, - } resp, err = f.srv.CallJSON(&opts, nil, &folderList) return f.shouldRetry(resp, err) }) if err != nil { return nil, errors.Wrap(err, "failed to get folder list") } + resp.Body.Close() for _, folder := range folderList.Folders { + folder.Name = restoreReservedChars(folder.Name) fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID) - remote := job.Path + folder.Name - if out.IncludeDirectory(remote) { - dir := &fs.Dir{ - Name: remote, - Bytes: -1, - Count: -1, - } - dir.When = time.Unix(int64(folder.DateModified), 0) - if out.AddDir(dir) { - continue - } - if job.Depth > 0 { - jobs = append(jobs, dircache.ListDirJob{DirID: folder.FolderID, Path: remote + "/", Depth: job.Depth - 1}) - } - } + remote := path.Join(dir, folder.Name) + // cache the directory ID for later lookups + f.dirCache.Put(remote, folder.FolderID) + d := fs.NewDir(remote, time.Unix(int64(folder.DateModified), 0)).SetID(folder.FolderID) + d.SetItems(int64(folder.ChildFolders)) + entries = append(entries, d) } for _, file := range folderList.Files { + file.Name = restoreReservedChars(file.Name) fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID) - remote := job.Path + file.Name + remote := path.Join(dir, file.Name) o, err := f.newObjectWithInfo(remote, &file) if err != nil { - out.SetError(err) - continue + return nil, err } - out.Add(o) + entries = append(entries, o) } - return jobs, nil + return entries, nil } // ------------------------------------------------------------ @@ -495,9 +806,9 @@ func (o *Object) Remote() string { } // Hash returns the Md5sum of an object returning a lowercase hex string -func (o *Object) Hash(t fs.HashType) (string, error) { - if t != fs.HashMD5 { - return "", fs.ErrHashUnsupported +func (o *Object) Hash(t hash.Type) (string, error) { + if t != hash.MD5 { + return "", hash.ErrUnsupported } return o.md5, nil } @@ -518,20 +829,42 @@ func (o *Object) ModTime() time.Time { // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(modTime time.Time) error { - // FIXME not implemented - return fs.ErrorCantSetModTime + fs.Debugf(nil, "SetModTime(%v)", modTime.String()) + opts := rest.Opts{ + Method: "PUT", + NoResponse: true, + Path: "/file/filesettings.json", + } + update := modTimeFile{SessionID: o.fs.session.SessionID, FileID: o.id, FileModificationTime: strconv.FormatInt(modTime.Unix(), 10)} + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(&opts, &update, nil) + return o.fs.shouldRetry(resp, err) + }) + + o.modTime = modTime + + return err } // Open an object for read func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { fs.Debugf(nil, "Open(\"%v\")", o.remote) + opts := fs.OpenOptionHeaders(options) + offset := "0" + + if "" != opts["Range"] { + parts := strings.Split(opts["Range"], "=") + parts = strings.Split(parts[1], "-") + offset = parts[0] + } + // get the folderIDs var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { opts := rest.Opts{ Method: "GET", - Path: "/download/file.json/" + o.id + "?session_id=" + o.fs.session.SessionID, + Path: "/download/file.json/" + o.id + "?session_id=" + o.fs.session.SessionID + "&offset=" + offset, } resp, err = o.fs.srv.Call(&opts) return o.fs.shouldRetry(resp, err) @@ -546,7 +879,15 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { // Remove an object func (o *Object) Remove() error { fs.Debugf(nil, "Remove(\"%s\")", o.id) - return fmt.Errorf("Remove not implemented") + return o.fs.pacer.Call(func() (bool, error) { + opts := rest.Opts{ + Method: "DELETE", + NoResponse: true, + Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id, + } + resp, err := o.fs.srv.Call(&opts) + return o.fs.shouldRetry(resp, err) + }) } // Storable returns a boolean showing whether this object storable @@ -560,48 +901,26 @@ func (o *Object) Storable() bool { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { size := src.Size() modTime := src.ModTime() - fs.Debugf(nil, "%d %d", size, modTime) fs.Debugf(nil, "Update(\"%s\", \"%s\")", o.id, o.remote) - var err error - if "" == o.id { - // We need to create a ID for this file - var resp *http.Response - response := createFileResponse{} - err = o.fs.pacer.Call(func() (bool, error) { - createFileData := createFile{SessionID: o.fs.session.SessionID, FolderID: "0", Name: o.remote} - opts := rest.Opts{ - Method: "POST", - Path: "/upload/create_file.json", - } - resp, err = o.fs.srv.CallJSON(&opts, &createFileData, &response) - return o.fs.shouldRetry(resp, err) - }) - if err != nil { - return errors.Wrap(err, "failed to create file") - } - - o.id = response.FileID - } - fmt.Println(o.id) - // Open file for upload var resp *http.Response openResponse := openUploadResponse{} - err = o.fs.pacer.Call(func() (bool, error) { + err := o.fs.pacer.Call(func() (bool, error) { openUploadData := openUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size} - fs.Debugf(nil, "PreOpen: %s", openUploadData) + fs.Debugf(nil, "PreOpen: %#v", openUploadData) opts := rest.Opts{ Method: "POST", Path: "/upload/open_file_upload.json", } - resp, err = o.fs.srv.CallJSON(&opts, &openUploadData, &openResponse) + resp, err := o.fs.srv.CallJSON(&opts, &openUploadData, &openResponse) return o.fs.shouldRetry(resp, err) }) if err != nil { return errors.Wrap(err, "failed to create file") } - fs.Debugf(nil, "PostOpen: %s", openResponse) + // resp.Body.Close() + fs.Debugf(nil, "PostOpen: %#v", openResponse) // 1 MB chunks size chunkSize := int64(1024 * 1024 * 10) @@ -685,19 +1004,17 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio if err != nil { return errors.Wrap(err, "failed to create file") } - - fmt.Println(resp.Body) resp.Body.Close() chunkCounter++ chunkOffset += currentChunkSize } - // CLose file for upload + // Close file for upload closeResponse := closeUploadResponse{} err = o.fs.pacer.Call(func() (bool, error) { closeUploadData := closeUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size, TempLocation: openResponse.TempLocation} - fs.Debugf(nil, "PreClose: %s", closeUploadData) + fs.Debugf(nil, "PreClose: %#v", closeUploadData) opts := rest.Opts{ Method: "POST", Path: "/upload/close_file_upload.json", @@ -708,29 +1025,33 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio if err != nil { return errors.Wrap(err, "failed to create file") } - fs.Debugf(nil, "PostClose: %s", closeResponse) + resp.Body.Close() + fs.Debugf(nil, "PostClose: %#v", closeResponse) - // file := acd.File{Node: o.info} - // var info *acd.File - // var resp *http.Response - // var err error - // err = o.fs.pacer.CallNoRetry(func() (bool, error) { - // start := time.Now() - // o.fs.tokenRenewer.Start() - // info, resp, err = file.Overwrite(in) - // o.fs.tokenRenewer.Stop() - // var ok bool - // ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start)) - // if ok { - // return false, nil - // } - // return o.fs.shouldRetry(resp, err) - // }) - // if err != nil { - // return err - // } - // o.info = info.Node - // return nil + o.id = closeResponse.FileID + o.size = closeResponse.Size + + // Set the mod time now and read metadata + err = o.SetModTime(modTime) + if err != nil { + return err + } + + // Set permissions + err = o.fs.pacer.Call(func() (bool, error) { + update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: 0} + fs.Debugf(nil, "Permissions : %#v", update) + opts := rest.Opts{ + Method: "POST", + NoResponse: true, + Path: "/file/access.json", + } + resp, err = o.fs.srv.CallJSON(&opts, &update, nil) + return o.fs.shouldRetry(resp, err) + }) + if err != nil { + return err + } return nil } @@ -748,7 +1069,7 @@ func (o *Object) readMetaData() (err error) { err = o.fs.pacer.Call(func() (bool, error) { opts := rest.Opts{ Method: "GET", - Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + leaf, + Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + pathEscape(replaceReservedChars(leaf)), } resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList) return o.fs.shouldRetry(resp, err) @@ -756,6 +1077,7 @@ func (o *Object) readMetaData() (err error) { if err != nil { return errors.Wrap(err, "failed to get folder list") } + resp.Body.Close() if len(folderList.Files) == 0 { return fs.ErrorObjectNotFound @@ -764,7 +1086,7 @@ func (o *Object) readMetaData() (err error) { leafFile := folderList.Files[0] o.id = leafFile.FileID o.modTime = time.Unix(leafFile.DateModified, 0) - o.md5 = "" + o.md5 = leafFile.FileHash o.size = leafFile.Size return nil diff --git a/backend/opendrive/opendrive_test.go b/backend/opendrive/opendrive_test.go new file mode 100644 index 000000000..66a0a3533 --- /dev/null +++ b/backend/opendrive/opendrive_test.go @@ -0,0 +1,17 @@ +// Test Opendrive filesystem interface +package opendrive_test + +import ( + "testing" + + "github.com/ncw/rclone/backend/opendrive" + "github.com/ncw/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestOpenDrive:", + NilObject: (*opendrive.Object)(nil), + }) +} diff --git a/backend/opendrive/replace.go b/backend/opendrive/replace.go new file mode 100644 index 000000000..b6c64b8df --- /dev/null +++ b/backend/opendrive/replace.go @@ -0,0 +1,84 @@ +/* +Translate file names for OpenDrive + +OpenDrive reserved characters + +The following characters are OpenDrive reserved characters, and can't +be used in OpenDrive folder and file names. + +\\ / : * ? \" < > |" + +*/ + +package opendrive + +import ( + "regexp" + "strings" +) + +// charMap holds replacements for characters +// +// Onedrive has a restricted set of characters compared to other cloud +// storage systems, so we to map these to the FULLWIDTH unicode +// equivalents +// +// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS +var ( + charMap = map[rune]rune{ + '\\': '\', // FULLWIDTH REVERSE SOLIDUS + '*': '*', // FULLWIDTH ASTERISK + '<': '<', // FULLWIDTH LESS-THAN SIGN + '>': '>', // FULLWIDTH GREATER-THAN SIGN + '?': '?', // FULLWIDTH QUESTION MARK + ':': ':', // FULLWIDTH COLON + '|': '|', // FULLWIDTH VERTICAL LINE + '#': '#', // FULLWIDTH NUMBER SIGN + '%': '%', // FULLWIDTH PERCENT SIGN + '"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved + '.': '.', // FULLWIDTH FULL STOP + '~': '~', // FULLWIDTH TILDE + ' ': '␠', // SYMBOL FOR SPACE + } + invCharMap map[rune]rune + fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`) + fixStartingWithTilde = regexp.MustCompile(`(/|^)~`) + fixStartingWithSpace = regexp.MustCompile(`(/|^) `) +) + +func init() { + // Create inverse charMap + invCharMap = make(map[rune]rune, len(charMap)) + for k, v := range charMap { + invCharMap[v] = k + } +} + +// replaceReservedChars takes a path and substitutes any reserved +// characters in it +func replaceReservedChars(in string) string { + // Folder names can't end with a period '.' + in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1") + // OneDrive for Business file or folder names cannot begin with a tilde '~' + in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~'])) + // Apparently file names can't start with space either + in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' '])) + // Replace reserved characters + return strings.Map(func(c rune) rune { + if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' { + return replacement + } + return c + }, in) +} + +// restoreReservedChars takes a path and undoes any substitutions +// made by replaceReservedChars +func restoreReservedChars(in string) string { + return strings.Map(func(c rune) rune { + if replacement, ok := invCharMap[c]; ok { + return replacement + } + return c + }, in) +} diff --git a/backend/opendrive/replace_test.go b/backend/opendrive/replace_test.go new file mode 100644 index 000000000..777fdec31 --- /dev/null +++ b/backend/opendrive/replace_test.go @@ -0,0 +1,30 @@ +package opendrive + +import "testing" + +func TestReplace(t *testing.T) { + for _, test := range []struct { + in string + out string + }{ + {"", ""}, + {"abc 123", "abc 123"}, + {`\*<>?:|#%".~`, `\*<>?:|#%".~`}, + {`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`}, + {" leading space", "␠leading space"}, + {"~leading tilde", "~leading tilde"}, + {"trailing dot.", "trailing dot."}, + {" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"}, + {"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"}, + {"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."}, + } { + got := replaceReservedChars(test.in) + if got != test.out { + t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got) + } + got2 := restoreReservedChars(got) + if got2 != test.in { + t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2) + } + } +} diff --git a/opendrive/types.go b/backend/opendrive/types.go similarity index 56% rename from opendrive/types.go rename to backend/opendrive/types.go index 009083c01..e09cd0853 100644 --- a/opendrive/types.go +++ b/backend/opendrive/types.go @@ -1,5 +1,9 @@ package opendrive +import ( + "encoding/json" +) + // Account describes a OpenDRIVE account type Account struct { Username string `json:"username"` @@ -11,20 +15,20 @@ type UserSessionInfo struct { Username string `json:"username"` Password string `json:"passwd"` - SessionID string `json:"SessionID"` - UserName string `json:"UserName"` - UserFirstName string `json:"UserFirstName"` - UserLastName string `json:"UserLastName"` - AccType string `json:"AccType"` - UserLang string `json:"UserLang"` - UserID string `json:"UserID"` - IsAccountUser int `json:"IsAccountUser"` - DriveName string `json:"DriveName"` - UserLevel string `json:"UserLevel"` - UserPlan string `json:"UserPlan"` - FVersioning string `json:"FVersioning"` - UserDomain string `json:"UserDomain"` - PartnerUsersDomain string `json:"PartnerUsersDomain"` + SessionID string `json:"SessionID"` + UserName string `json:"UserName"` + UserFirstName string `json:"UserFirstName"` + UserLastName string `json:"UserLastName"` + AccType string `json:"AccType"` + UserLang string `json:"UserLang"` + UserID string `json:"UserID"` + IsAccountUser json.RawMessage `json:"IsAccountUser"` + DriveName string `json:"DriveName"` + UserLevel string `json:"UserLevel"` + UserPlan string `json:"UserPlan"` + FVersioning string `json:"FVersioning"` + UserDomain string `json:"UserDomain"` + PartnerUsersDomain string `json:"PartnerUsersDomain"` } // FolderList describes a OpenDRIVE listing @@ -52,9 +56,48 @@ type Folder struct { Encrypted string `json:"Encrypted"` } +type createFolder struct { + SessionID string `json:"session_id"` + FolderName string `json:"folder_name"` + FolderSubParent string `json:"folder_sub_parent"` + FolderIsPublic int64 `json:"folder_is_public"` // (0 = private, 1 = public, 2 = hidden) + FolderPublicUpl int64 `json:"folder_public_upl"` // (0 = disabled, 1 = enabled) + FolderPublicDisplay int64 `json:"folder_public_display"` // (0 = disabled, 1 = enabled) + FolderPublicDnl int64 `json:"folder_public_dnl"` // (0 = disabled, 1 = enabled). +} + +type createFolderResponse struct { + FolderID string `json:"FolderID"` + Name string `json:"Name"` + DateCreated int `json:"DateCreated"` + DirUpdateTime int `json:"DirUpdateTime"` + Access int `json:"Access"` + DateModified int `json:"DateModified"` + Shared string `json:"Shared"` + Description string `json:"Description"` + Link string `json:"Link"` +} + +type moveFolder struct { + SessionID string `json:"session_id"` + FolderID string `json:"folder_id"` + DstFolderID string `json:"dst_folder_id"` + Move string `json:"move"` +} + +type moveFolderResponse struct { + FolderID string `json:"FolderID"` +} + +type removeFolder struct { + SessionID string `json:"session_id"` + FolderID string `json:"folder_id"` +} + // File describes a OpenDRIVE file type File struct { FileID string `json:"FileId"` + FileHash string `json:"FileHash"` Name string `json:"Name"` GroupID int `json:"GroupID"` Extension string `json:"Extension"` @@ -74,6 +117,19 @@ type File struct { EditOnline int `json:"EditOnline"` } +type copyFile struct { + SessionID string `json:"session_id"` + SrcFileID string `json:"src_file_id"` + DstFolderID string `json:"dst_folder_id"` + Move string `json:"move"` + OverwriteIfExists string `json:"overwrite_if_exists"` +} + +type copyFileResponse struct { + FileID string `json:"FileID"` + Size string `json:"Size"` +} + type createFile struct { SessionID string `json:"session_id"` FolderID string `json:"folder_id"` @@ -102,6 +158,12 @@ type createFileResponse struct { RequireHashOnly int `json:"RequireHashOnly"` } +type modTimeFile struct { + SessionID string `json:"session_id"` + FileID string `json:"file_id"` + FileModificationTime string `json:"file_modification_time"` +} + type openUpload struct { SessionID string `json:"session_id"` FileID string `json:"file_id"` @@ -124,6 +186,14 @@ type closeUpload struct { } type closeUploadResponse struct { + FileID string `json:"FileID"` FileHash string `json:"FileHash"` Size int64 `json:"Size"` } + +type permissions struct { + SessionID string `json:"session_id"` + FileID string `json:"file_id"` + FileIsPublic int64 `json:"file_ispublic"` +} + diff --git a/bin/make_manual.py b/bin/make_manual.py index e928c37b7..b52605f39 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -38,6 +38,7 @@ docs = [ "mega.md", "azureblob.md", "onedrive.md", + "opendrive.md", "qingstor.md", "swift.md", "pcloud.md", diff --git a/cmd/cmd.go b/cmd/cmd.go index 5aae802a8..ca7dcb47d 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -85,6 +85,7 @@ from various cloud storage systems and using file transfer services, such as: * Mega * Microsoft Azure Blob Storage * Microsoft OneDrive + * OpenDrive * Openstack Swift / Rackspace cloud files / Memset Memstore * pCloud * QingStor diff --git a/docs/content/about.md b/docs/content/about.md index 7ca33f71d..86cf43bbf 100644 --- a/docs/content/about.md +++ b/docs/content/about.md @@ -34,6 +34,7 @@ Rclone is a command line program to sync files and directories to and from: * {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}} * {{< provider name="Nextloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}} * {{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}} +* {{< provider name="OpenDrive" home="https://www.opendrive.com/" config="/opendrive/" >}} * {{< provider name="Openstack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}} * {{< provider name="Oracle Cloud Storage" home="https://cloud.oracle.com/storage-opc" config="/swift/" >}} * {{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}} diff --git a/docs/content/docs.md b/docs/content/docs.md index 2b4338d6a..11dfad151 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -37,6 +37,7 @@ See the following for detailed instructions for * [Microsoft Azure Blob Storage](/azureblob/) * [Microsoft OneDrive](/onedrive/) * [Openstack Swift / Rackspace Cloudfiles / Memset Memstore](/swift/) + * [OpenDrive](/opendrive/) * [Pcloud](/pcloud/) * [QingStor](/qingstor/) * [SFTP](/sftp/) diff --git a/docs/content/opendrive.md b/docs/content/opendrive.md new file mode 100644 index 000000000..e40d0184a --- /dev/null +++ b/docs/content/opendrive.md @@ -0,0 +1,114 @@ +--- +title: "OpenDrive" +description: "Rclone docs for OpenDrive" +date: "2017-08-07" +--- + + OpenDrive +------------------------------------ + +Paths are specified as `remote:path` + +Paths may be as deep as required, eg `remote:directory/subdirectory`. + +Here is an example of how to make a remote called `remote`. First run: + + rclone config + +This will guide you through an interactive setup process: + +``` +n) New remote +d) Delete remote +q) Quit config +e/n/d/q> n +name> remote +Type of storage to configure. +Choose a number from below, or type in your own value + 1 / Amazon Drive + \ "amazon cloud drive" + 2 / Amazon S3 (also Dreamhost, Ceph, Minio) + \ "s3" + 3 / Backblaze B2 + \ "b2" + 4 / Dropbox + \ "dropbox" + 5 / Encrypt/Decrypt a remote + \ "crypt" + 6 / Google Cloud Storage (this is not Google Drive) + \ "google cloud storage" + 7 / Google Drive + \ "drive" + 8 / Hubic + \ "hubic" + 9 / Local Disk + \ "local" +10 / OpenDrive + \ "opendrive" +11 / Microsoft OneDrive + \ "onedrive" +12 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH) + \ "swift" +13 / SSH/SFTP Connection + \ "sftp" +14 / Yandex Disk + \ "yandex" +Storage> 10 +Username +username> +Password +y) Yes type in my own password +g) Generate random password +y/g> y +Enter the password: +password: +Confirm the password: +password: +-------------------- +[remote] +username = +password = *** ENCRYPTED *** +-------------------- +y) Yes this is OK +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +List directories in top level of your OpenDrive + + rclone lsd remote: + +List all the files in your OpenDrive + + rclone ls remote: + +To copy a local directory to an OpenDrive directory called backup + + rclone copy /home/source remote:backup + +### Modified time and MD5SUMs ### + +OpenDrive allows modification times to be set on objects accurate to 1 +second. These will be used to detect whether objects need syncing or +not. + +### Deleting files ### + +Any files you delete with rclone will end up in the trash. Amazon +don't provide an API to permanently delete files, nor to empty the +trash, so you will have to do that with one of Amazon's apps or via +the OpenDrive website. As of November 17, 2016, files are +automatically deleted by Amazon from the trash after 30 days. + +### Limitations ### + +Note that OpenDrive is case insensitive so you can't have a +file called "Hello.doc" and one called "hello.doc". + +There are quite a few characters that can't be in OpenDrive file +names. These can't occur on Windows platforms, but on non-Windows +platforms they are common. Rclone will map these names to and from an +identical looking unicode equivalent. For example if a file has a `?` +in it will be mapped to `?` instead. + diff --git a/docs/content/overview.md b/docs/content/overview.md index 40e95e0f2..09120def8 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -30,6 +30,7 @@ Here is an overview of the major features of each cloud storage system. | Mega | - | No | No | Yes | - | | Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W | | Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R | +| OpenDrive | - | Yes | Yes | No | - | | Openstack Swift | MD5 | Yes | No | No | R/W | | pCloud | MD5, SHA1 | Yes | No | No | W | | QingStor | MD5 | No | No | No | R/W | @@ -139,6 +140,7 @@ operations more efficient. | Mega | Yes | No | Yes | Yes | No | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes | | Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No | | Microsoft OneDrive | Yes | Yes | Yes | No [#197](https://github.com/ncw/rclone/issues/197) | No [#575](https://github.com/ncw/rclone/issues/575) | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes | +| OpenDrive | Yes | No | No | No | No | No | No | No | No | | Openstack Swift | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes | | pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes | | QingStor | No | Yes | No | No | No | Yes | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No | diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 03f58c92b..ee07b5621 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -69,6 +69,7 @@
  • Mega
  • Microsoft Azure Blob Storage
  • Microsoft OneDrive
  • +
  • OpenDrive
  • QingStor
  • Openstack Swift
  • pCloud