opendrive: fill out the functionality #1026

* Add Mkdir, Rmdir, Purge, Delete, SetModTime, Copy, Move, DirMove
 * Update file size after upload
 * Add Open seek
 * Set private permission for new folder and uploaded file
 * Add docs
 * Update List function
 * Fix UserSessionInfo struct
 * Fix socket leaks
 * Don’t close resp.Body in Open method
 * Get hash when listing files
This commit is contained in:
Jakub Karlicek 2017-07-17 07:36:45 +02:00 committed by Nick Craig-Wood
parent ec9894da07
commit 53292527bb
14 changed files with 835 additions and 189 deletions

View File

@ -28,6 +28,7 @@ Rclone is a command line program to sync files and directories to and from
* Mega * Mega
* Microsoft Azure Blob Storage * Microsoft Azure Blob Storage
* Microsoft OneDrive * Microsoft OneDrive
* OpenDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage * Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
* pCloud * pCloud
* QingStor * QingStor

View File

@ -18,6 +18,7 @@ import (
_ "github.com/ncw/rclone/backend/local" _ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega" _ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive" _ "github.com/ncw/rclone/backend/onedrive"
_ "github.com/ncw/rclone/backend/opendrive"
_ "github.com/ncw/rclone/backend/pcloud" _ "github.com/ncw/rclone/backend/pcloud"
_ "github.com/ncw/rclone/backend/qingstor" _ "github.com/ncw/rclone/backend/qingstor"
_ "github.com/ncw/rclone/backend/s3" _ "github.com/ncw/rclone/backend/s3"

View File

@ -5,16 +5,22 @@ import (
"io" "io"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
"net/url"
"path"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"fmt" "fmt"
"github.com/ncw/rclone/dircache"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/pacer" "github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/rest" "github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -31,7 +37,7 @@ const (
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "opendrive", Name: "opendrive",
Description: "OpenDRIVE", Description: "OpenDrive",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "username", Name: "username",
@ -47,6 +53,7 @@ func init() {
// Fs represents a remote b2 server // Fs represents a remote b2 server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on
features *fs.Features // optional features features *fs.Features // optional features
username string // account name username string // account name
password string // auth key0 password string // auth key0
@ -72,6 +79,14 @@ func parsePath(path string) (root string) {
return return
} }
// mimics url.PathEscape which only available from go 1.8
func pathEscape(path string) string {
u := url.URL{
Path: path,
}
return u.EscapedPath()
}
// ------------------------------------------------------------ // ------------------------------------------------------------
// Name of the remote (as passed into NewFs) // Name of the remote (as passed into NewFs)
@ -81,12 +96,12 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs) // Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { func (f *Fs) Root() string {
return "/" return f.root
} }
// String converts this Fs to a string // String converts this Fs to a string
func (f *Fs) String() string { func (f *Fs) String() string {
return "OpenDRIVE" return fmt.Sprintf("OpenDrive root '%s'", f.root)
} }
// Features returns the optional features of this Fs // Features returns the optional features of this Fs
@ -95,13 +110,8 @@ func (f *Fs) Features() *fs.Features {
} }
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet { func (f *Fs) Hashes() hash.Set {
return fs.HashSet(fs.HashMD5) return hash.Set(hash.MD5)
}
// List walks the path returning iles and directories into out
func (f *Fs) List(out fs.ListOpts, dir string) {
f.dirCache.List(f, out, dir)
} }
// NewFs contstructs an Fs from the path, bucket:path // NewFs contstructs an Fs from the path, bucket:path
@ -112,7 +122,7 @@ func NewFs(name, root string) (fs.Fs, error) {
if username == "" { if username == "" {
return nil, errors.New("username not found") return nil, errors.New("username not found")
} }
password, err := fs.Reveal(fs.ConfigFileGet(name, "password")) password, err := obscure.Reveal(fs.ConfigFileGet(name, "password"))
if err != nil { if err != nil {
return nil, errors.New("password coudl not revealed") return nil, errors.New("password coudl not revealed")
} }
@ -120,14 +130,15 @@ func NewFs(name, root string) (fs.Fs, error) {
return nil, errors.New("password not found") return nil, errors.New("password not found")
} }
fs.Debugf(nil, "OpenDRIVE-user: %s", username) fs.Debugf(nil, "OpenDrive-user: %s", username)
fs.Debugf(nil, "OpenDRIVE-pass: %s", password) fs.Debugf(nil, "OpenDrive-pass: %s", password)
f := &Fs{ f := &Fs{
name: name, name: name,
username: username, username: username,
password: password, password: password,
srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler), root: root,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
} }
@ -151,8 +162,8 @@ func NewFs(name, root string) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to create session") return nil, errors.Wrap(err, "failed to create session")
} }
resp.Body.Close()
fs.Debugf(nil, "Starting OpenDRIVE session with ID: %s", f.session.SessionID) fs.Debugf(nil, "Starting OpenDrive session with ID: %s", f.session.SessionID)
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f) f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
@ -163,6 +174,7 @@ func NewFs(name, root string) (fs.Fs, error) {
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
newF := *f newF := *f
newF.dirCache = dircache.New(newRoot, "0", &newF) newF.dirCache = dircache.New(newRoot, "0", &newF)
newF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = newF.dirCache.FindRoot(false) err = newF.dirCache.FindRoot(false)
@ -184,6 +196,14 @@ func NewFs(name, root string) (fs.Fs, error) {
return f, nil return f, nil
} }
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func (f *Fs) rootSlash() string {
if f.root == "" {
return f.root
}
return f.root + "/"
}
// errorHandler parses a non 2xx error response into an error // errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error { func errorHandler(resp *http.Response) error {
// Decode error response // Decode error response
@ -205,7 +225,7 @@ func errorHandler(resp *http.Response) error {
return nil return nil
} }
// Mkdir creates the bucket if it doesn't exist // Mkdir creates the folder if it doesn't exist
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(dir string) error {
fs.Debugf(nil, "Mkdir(\"%s\")", dir) fs.Debugf(nil, "Mkdir(\"%s\")", dir)
err := f.dirCache.FindRoot(true) err := f.dirCache.FindRoot(true)
@ -218,42 +238,278 @@ func (f *Fs) Mkdir(dir string) error {
return err return err
} }
// Rmdir deletes the bucket if the fs is at the root // deleteObject removes an object by ID
func (f *Fs) deleteObject(id string) error {
return f.pacer.Call(func() (bool, error) {
removeDirData := removeFolder{SessionID: f.session.SessionID, FolderID: id}
opts := rest.Opts{
Method: "POST",
NoResponse: true,
Path: "/folder/remove.json",
}
resp, err := f.srv.CallJSON(&opts, &removeDirData, nil)
return f.shouldRetry(resp, err)
})
}
// purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir)
if root == "" {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(false)
if err != nil {
return err
}
rootID, err := dc.FindDir(dir, false)
if err != nil {
return err
}
item, err := f.readMetaDataForFolderID(rootID)
if err != nil {
return err
}
if check && len(item.Files) != 0 {
return errors.New("folder not empty")
}
err = f.deleteObject(rootID)
if err != nil {
return err
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}
// Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(dir string) error {
fs.Debugf(nil, "Rmdir(\"%s\")", dir) fs.Debugf(nil, "Rmdir(\"%s\")", path.Join(f.root, dir))
// if f.root != "" || dir != "" { return f.purgeCheck(dir, true)
// return nil
// }
// opts := rest.Opts{
// Method: "POST",
// Path: "/b2_delete_bucket",
// }
// bucketID, err := f.getBucketID()
// if err != nil {
// return err
// }
// var request = api.DeleteBucketRequest{
// ID: bucketID,
// AccountID: f.info.AccountID,
// }
// var response api.Bucket
// err = f.pacer.Call(func() (bool, error) {
// resp, err := f.srv.CallJSON(&opts, &request, &response)
// return f.shouldRetry(resp, err)
// })
// if err != nil {
// return errors.Wrap(err, "failed to delete bucket")
// }
// f.clearBucketID()
// f.clearUploadURL()
return nil
} }
// Precision of the remote // Precision of the remote
func (f *Fs) Precision() time.Duration { func (f *Fs) Precision() time.Duration {
return time.Millisecond return time.Second
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(nil, "Copy(%v)", remote)
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err := srcObj.readMetaData()
if err != nil {
return nil, err
}
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("Can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
dstObj, _, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil {
return nil, err
}
fs.Debugf(nil, "...%#v\n...%#v", remote, directoryID)
// Copy the object
var resp *http.Response
response := copyFileResponse{}
err = f.pacer.Call(func() (bool, error) {
copyFileData := copyFile{
SessionID: f.session.SessionID,
SrcFileID: srcObj.id,
DstFolderID: directoryID,
Move: "false",
OverwriteIfExists: "true",
}
opts := rest.Opts{
Method: "POST",
Path: "/file/move_copy.json",
}
resp, err = f.srv.CallJSON(&opts, &copyFileData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
resp.Body.Close()
size, _ := strconv.ParseInt(response.Size, 10, 64)
dstObj.id = response.FileID
dstObj.size = size
return dstObj, nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(nil, "Move(%v)", remote)
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantCopy
}
err := srcObj.readMetaData()
if err != nil {
return nil, err
}
// Create temporary object
dstObj, _, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil {
return nil, err
}
// Copy the object
var resp *http.Response
response := copyFileResponse{}
err = f.pacer.Call(func() (bool, error) {
copyFileData := copyFile{
SessionID: f.session.SessionID,
SrcFileID: srcObj.id,
DstFolderID: directoryID,
Move: "true",
OverwriteIfExists: "true",
}
opts := rest.Opts{
Method: "POST",
Path: "/file/move_copy.json",
}
resp, err = f.srv.CallJSON(&opts, &copyFileData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
resp.Body.Close()
size, _ := strconv.ParseInt(response.Size, 10, 64)
dstObj.id = response.FileID
dstObj.size = size
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
fs.Debugf(nil, "DirMove(%v)", src.Root())
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "DirMove error: not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
// Refuse to move to or from the root
if srcPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err = srcFs.dirCache.FindRoot(false)
if err != nil {
return err
}
// Find ID of src parent
srcDirectoryID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil {
return err
}
// Find ID of dst parent, creating subdirs if necessary
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
dstDirectoryID, err := f.dirCache.FindDir(findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
var resp *http.Response
response := moveFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
moveFolderData := moveFolder{
SessionID: f.session.SessionID,
FolderID: srcDirectoryID,
DstFolderID: dstDirectoryID,
Move: "true",
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/move_copy.json",
}
resp, err = f.srv.CallJSON(&opts, &moveFolderData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
fs.Debugf(src, "DirMove error %v", err)
return err
}
resp.Body.Close()
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
return f.purgeCheck("", false)
} }
// Return an Object from a path // Return an Object from a path
@ -270,6 +526,7 @@ func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) {
id: file.FileID, id: file.FileID,
modTime: time.Unix(file.DateModified, 0), modTime: time.Unix(file.DateModified, 0),
size: file.Size, size: file.Size,
md5: file.FileHash,
} }
} else { } else {
o = &Object{ o = &Object{
@ -282,14 +539,13 @@ func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) {
return nil, err return nil, err
} }
} }
fs.Debugf(nil, "%v", o)
return o, nil return o, nil
} }
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
fs.Debugf(nil, "NewObject(\"%s\"", remote) fs.Debugf(nil, "NewObject(\"%s\")", remote)
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
@ -305,6 +561,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
if err != nil { if err != nil {
return nil, leaf, directoryID, err return nil, leaf, directoryID, err
} }
fs.Debugf(nil, "\n...leaf %#v\n...id %#v", leaf, directoryID)
// Temporary Object under construction // Temporary Object under construction
o = &Object{ o = &Object{
fs: f, fs: f,
@ -313,6 +570,27 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
return o, leaf, directoryID, nil return o, leaf, directoryID, nil
} }
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForFolderID(id string) (info *FolderList, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
Path: "/folder/list.json/" + f.session.SessionID + "/" + id,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &info)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
if resp != nil {
resp.Body.Close()
}
return info, err
}
// Put the object into the bucket // Put the object into the bucket
// //
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
@ -325,10 +603,36 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
fs.Debugf(nil, "Put(%s)", remote) fs.Debugf(nil, "Put(%s)", remote)
o, _, _, err := f.createObject(remote, modTime, size) o, leaf, directoryID, err := f.createObject(remote, modTime, size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if "" == o.id {
o.readMetaData()
}
if "" == o.id {
// We need to create a ID for this file
var resp *http.Response
response := createFileResponse{}
err := o.fs.pacer.Call(func() (bool, error) {
createFileData := createFile{SessionID: o.fs.session.SessionID, FolderID: directoryID, Name: replaceReservedChars(leaf)}
opts := rest.Opts{
Method: "POST",
Path: "/upload/create_file.json",
}
resp, err = o.fs.srv.CallJSON(&opts, &createFileData, &response)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to create file")
}
resp.Body.Close()
o.id = response.FileID
}
return o, o.Update(in, src, options...) return o, o.Update(in, src, options...)
} }
@ -347,43 +651,39 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err // shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience // deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
// if resp != nil { return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
// if resp.StatusCode == 401 {
// f.tokenRenewer.Invalidate()
// fs.Debugf(f, "401 error received - invalidating token")
// return true, err
// }
// // Work around receiving this error sporadically on authentication
// //
// // HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
// if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
// fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
// return true, err
// }
// }
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
// DirCacher methos // DirCacher methods
// CreateDir makes a directory with pathID as parent and name leaf // CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
fs.Debugf(nil, "CreateDir(\"%s\", \"%s\")", pathID, leaf) fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, replaceReservedChars(leaf))
// //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf) var resp *http.Response
// folder := acd.FolderFromId(pathID, f.c.Nodes) response := createFolderResponse{}
// var resp *http.Response err = f.pacer.Call(func() (bool, error) {
// var info *acd.Folder createDirData := createFolder{
// err = f.pacer.Call(func() (bool, error) { SessionID: f.session.SessionID,
// info, resp, err = folder.CreateFolder(leaf) FolderName: replaceReservedChars(leaf),
// return f.shouldRetry(resp, err) FolderSubParent: pathID,
// }) FolderIsPublic: 0,
// if err != nil { FolderPublicUpl: 0,
// //fmt.Printf("...Error %v\n", err) FolderPublicDisplay: 0,
// return "", err FolderPublicDnl: 0,
// } }
// //fmt.Printf("...Id %q\n", *info.Id) opts := rest.Opts{
// return *info.Id, nil Method: "POST",
return "", fmt.Errorf("CreateDir not implemented") Path: "/folder.json",
}
resp, err = f.srv.CallJSON(&opts, &createDirData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return "", err
}
resp.Body.Close()
return response.FolderID, nil
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
@ -391,7 +691,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
fs.Debugf(nil, "FindLeaf(\"%s\", \"%s\")", pathID, leaf) fs.Debugf(nil, "FindLeaf(\"%s\", \"%s\")", pathID, leaf)
if pathID == "0" && leaf == "" { if pathID == "0" && leaf == "" {
fs.Debugf(nil, "Found OpenDRIVE root") fs.Debugf(nil, "Found OpenDrive root")
// that's the root directory // that's the root directory
return pathID, true, nil return pathID, true, nil
} }
@ -410,8 +710,10 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
if err != nil { if err != nil {
return "", false, errors.Wrap(err, "failed to get folder list") return "", false, errors.Wrap(err, "failed to get folder list")
} }
resp.Body.Close()
for _, folder := range folderList.Folders { for _, folder := range folderList.Folders {
folder.Name = restoreReservedChars(folder.Name)
fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID) fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
if leaf == folder.Name { if leaf == folder.Name {
@ -423,55 +725,64 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
return "", false, nil return "", false, nil
} }
// ListDir reads the directory specified by the job into out, returning any more jobs // List the objects and directories in dir into entries. The
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) { // entries can be returned in any order but should be for a
fs.Debugf(nil, "ListDir(%v, %v)", out, job) // complete directory.
// get the folderIDs //
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs.Debugf(nil, "List(%v)", dir)
err = f.dirCache.FindRoot(false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil {
return nil, err
}
var resp *http.Response var resp *http.Response
opts := rest.Opts{
Method: "GET",
Path: "/folder/list.json/" + f.session.SessionID + "/" + directoryID,
}
folderList := FolderList{} folderList := FolderList{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
opts := rest.Opts{
Method: "GET",
Path: "/folder/list.json/" + f.session.SessionID + "/" + job.DirID,
}
resp, err = f.srv.CallJSON(&opts, nil, &folderList) resp, err = f.srv.CallJSON(&opts, nil, &folderList)
return f.shouldRetry(resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get folder list") return nil, errors.Wrap(err, "failed to get folder list")
} }
resp.Body.Close()
for _, folder := range folderList.Folders { for _, folder := range folderList.Folders {
folder.Name = restoreReservedChars(folder.Name)
fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID) fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
remote := job.Path + folder.Name remote := path.Join(dir, folder.Name)
if out.IncludeDirectory(remote) { // cache the directory ID for later lookups
dir := &fs.Dir{ f.dirCache.Put(remote, folder.FolderID)
Name: remote, d := fs.NewDir(remote, time.Unix(int64(folder.DateModified), 0)).SetID(folder.FolderID)
Bytes: -1, d.SetItems(int64(folder.ChildFolders))
Count: -1, entries = append(entries, d)
}
dir.When = time.Unix(int64(folder.DateModified), 0)
if out.AddDir(dir) {
continue
}
if job.Depth > 0 {
jobs = append(jobs, dircache.ListDirJob{DirID: folder.FolderID, Path: remote + "/", Depth: job.Depth - 1})
}
}
} }
for _, file := range folderList.Files { for _, file := range folderList.Files {
file.Name = restoreReservedChars(file.Name)
fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID) fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
remote := job.Path + file.Name remote := path.Join(dir, file.Name)
o, err := f.newObjectWithInfo(remote, &file) o, err := f.newObjectWithInfo(remote, &file)
if err != nil { if err != nil {
out.SetError(err) return nil, err
continue
} }
out.Add(o) entries = append(entries, o)
} }
return jobs, nil return entries, nil
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@ -495,9 +806,9 @@ func (o *Object) Remote() string {
} }
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != fs.HashMD5 { if t != hash.MD5 {
return "", fs.ErrHashUnsupported return "", hash.ErrUnsupported
} }
return o.md5, nil return o.md5, nil
} }
@ -518,20 +829,42 @@ func (o *Object) ModTime() time.Time {
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
// FIXME not implemented fs.Debugf(nil, "SetModTime(%v)", modTime.String())
return fs.ErrorCantSetModTime opts := rest.Opts{
Method: "PUT",
NoResponse: true,
Path: "/file/filesettings.json",
}
update := modTimeFile{SessionID: o.fs.session.SessionID, FileID: o.id, FileModificationTime: strconv.FormatInt(modTime.Unix(), 10)}
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(&opts, &update, nil)
return o.fs.shouldRetry(resp, err)
})
o.modTime = modTime
return err
} }
// Open an object for read // Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.Debugf(nil, "Open(\"%v\")", o.remote) fs.Debugf(nil, "Open(\"%v\")", o.remote)
opts := fs.OpenOptionHeaders(options)
offset := "0"
if "" != opts["Range"] {
parts := strings.Split(opts["Range"], "=")
parts = strings.Split(parts[1], "-")
offset = parts[0]
}
// get the folderIDs // get the folderIDs
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/download/file.json/" + o.id + "?session_id=" + o.fs.session.SessionID, Path: "/download/file.json/" + o.id + "?session_id=" + o.fs.session.SessionID + "&offset=" + offset,
} }
resp, err = o.fs.srv.Call(&opts) resp, err = o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)
@ -546,7 +879,15 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Remove an object // Remove an object
func (o *Object) Remove() error { func (o *Object) Remove() error {
fs.Debugf(nil, "Remove(\"%s\")", o.id) fs.Debugf(nil, "Remove(\"%s\")", o.id)
return fmt.Errorf("Remove not implemented") return o.fs.pacer.Call(func() (bool, error) {
opts := rest.Opts{
Method: "DELETE",
NoResponse: true,
Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id,
}
resp, err := o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err)
})
} }
// Storable returns a boolean showing whether this object storable // Storable returns a boolean showing whether this object storable
@ -560,48 +901,26 @@ func (o *Object) Storable() bool {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
size := src.Size() size := src.Size()
modTime := src.ModTime() modTime := src.ModTime()
fs.Debugf(nil, "%d %d", size, modTime)
fs.Debugf(nil, "Update(\"%s\", \"%s\")", o.id, o.remote) fs.Debugf(nil, "Update(\"%s\", \"%s\")", o.id, o.remote)
var err error
if "" == o.id {
// We need to create a ID for this file
var resp *http.Response
response := createFileResponse{}
err = o.fs.pacer.Call(func() (bool, error) {
createFileData := createFile{SessionID: o.fs.session.SessionID, FolderID: "0", Name: o.remote}
opts := rest.Opts{
Method: "POST",
Path: "/upload/create_file.json",
}
resp, err = o.fs.srv.CallJSON(&opts, &createFileData, &response)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create file")
}
o.id = response.FileID
}
fmt.Println(o.id)
// Open file for upload // Open file for upload
var resp *http.Response var resp *http.Response
openResponse := openUploadResponse{} openResponse := openUploadResponse{}
err = o.fs.pacer.Call(func() (bool, error) { err := o.fs.pacer.Call(func() (bool, error) {
openUploadData := openUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size} openUploadData := openUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size}
fs.Debugf(nil, "PreOpen: %s", openUploadData) fs.Debugf(nil, "PreOpen: %#v", openUploadData)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/upload/open_file_upload.json", Path: "/upload/open_file_upload.json",
} }
resp, err = o.fs.srv.CallJSON(&opts, &openUploadData, &openResponse) resp, err := o.fs.srv.CallJSON(&opts, &openUploadData, &openResponse)
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "failed to create file") return errors.Wrap(err, "failed to create file")
} }
fs.Debugf(nil, "PostOpen: %s", openResponse) // resp.Body.Close()
fs.Debugf(nil, "PostOpen: %#v", openResponse)
// 1 MB chunks size // 1 MB chunks size
chunkSize := int64(1024 * 1024 * 10) chunkSize := int64(1024 * 1024 * 10)
@ -685,19 +1004,17 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if err != nil { if err != nil {
return errors.Wrap(err, "failed to create file") return errors.Wrap(err, "failed to create file")
} }
fmt.Println(resp.Body)
resp.Body.Close() resp.Body.Close()
chunkCounter++ chunkCounter++
chunkOffset += currentChunkSize chunkOffset += currentChunkSize
} }
// CLose file for upload // Close file for upload
closeResponse := closeUploadResponse{} closeResponse := closeUploadResponse{}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
closeUploadData := closeUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size, TempLocation: openResponse.TempLocation} closeUploadData := closeUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size, TempLocation: openResponse.TempLocation}
fs.Debugf(nil, "PreClose: %s", closeUploadData) fs.Debugf(nil, "PreClose: %#v", closeUploadData)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/upload/close_file_upload.json", Path: "/upload/close_file_upload.json",
@ -708,29 +1025,33 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if err != nil { if err != nil {
return errors.Wrap(err, "failed to create file") return errors.Wrap(err, "failed to create file")
} }
fs.Debugf(nil, "PostClose: %s", closeResponse) resp.Body.Close()
fs.Debugf(nil, "PostClose: %#v", closeResponse)
// file := acd.File{Node: o.info} o.id = closeResponse.FileID
// var info *acd.File o.size = closeResponse.Size
// var resp *http.Response
// var err error // Set the mod time now and read metadata
// err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.SetModTime(modTime)
// start := time.Now() if err != nil {
// o.fs.tokenRenewer.Start() return err
// info, resp, err = file.Overwrite(in) }
// o.fs.tokenRenewer.Stop()
// var ok bool // Set permissions
// ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start)) err = o.fs.pacer.Call(func() (bool, error) {
// if ok { update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: 0}
// return false, nil fs.Debugf(nil, "Permissions : %#v", update)
// } opts := rest.Opts{
// return o.fs.shouldRetry(resp, err) Method: "POST",
// }) NoResponse: true,
// if err != nil { Path: "/file/access.json",
// return err }
// } resp, err = o.fs.srv.CallJSON(&opts, &update, nil)
// o.info = info.Node return o.fs.shouldRetry(resp, err)
// return nil })
if err != nil {
return err
}
return nil return nil
} }
@ -748,7 +1069,7 @@ func (o *Object) readMetaData() (err error) {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + leaf, Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + pathEscape(replaceReservedChars(leaf)),
} }
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList) resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)
@ -756,6 +1077,7 @@ func (o *Object) readMetaData() (err error) {
if err != nil { if err != nil {
return errors.Wrap(err, "failed to get folder list") return errors.Wrap(err, "failed to get folder list")
} }
resp.Body.Close()
if len(folderList.Files) == 0 { if len(folderList.Files) == 0 {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
@ -764,7 +1086,7 @@ func (o *Object) readMetaData() (err error) {
leafFile := folderList.Files[0] leafFile := folderList.Files[0]
o.id = leafFile.FileID o.id = leafFile.FileID
o.modTime = time.Unix(leafFile.DateModified, 0) o.modTime = time.Unix(leafFile.DateModified, 0)
o.md5 = "" o.md5 = leafFile.FileHash
o.size = leafFile.Size o.size = leafFile.Size
return nil return nil

View File

@ -0,0 +1,17 @@
// Test Opendrive filesystem interface
package opendrive_test
import (
"testing"
"github.com/ncw/rclone/backend/opendrive"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestOpenDrive:",
NilObject: (*opendrive.Object)(nil),
})
}

View File

@ -0,0 +1,84 @@
/*
Translate file names for OpenDrive
OpenDrive reserved characters
The following characters are OpenDrive reserved characters, and can't
be used in OpenDrive folder and file names.
\\ / : * ? \" < > |"
*/
package opendrive
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
'|': '', // FULLWIDTH VERTICAL LINE
'#': '', // FULLWIDTH NUMBER SIGN
'%': '', // FULLWIDTH PERCENT SIGN
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'.': '', // FULLWIDTH FULL STOP
'~': '', // FULLWIDTH TILDE
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Folder names can't end with a period '.'
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
// OneDrive for Business file or folder names cannot begin with a tilde '~'
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
// Apparently file names can't start with space either
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Replace reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@ -0,0 +1,30 @@
package opendrive
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:|#%".~`, `.~`},
{`\*<>?:|#%".~/\*<>?:|#%".~`, `.~/.~`},
{" leading space", "␠leading space"},
{"~leading tilde", "leading tilde"},
{"trailing dot.", "trailing dot"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"~leading tilde/~leading tilde/~leading tilde", "leading tilde/leading tilde/leading tilde"},
{"trailing dot./trailing dot./trailing dot.", "trailing dot/trailing dot/trailing dot"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@ -1,5 +1,9 @@
package opendrive package opendrive
import (
"encoding/json"
)
// Account describes a OpenDRIVE account // Account describes a OpenDRIVE account
type Account struct { type Account struct {
Username string `json:"username"` Username string `json:"username"`
@ -11,20 +15,20 @@ type UserSessionInfo struct {
Username string `json:"username"` Username string `json:"username"`
Password string `json:"passwd"` Password string `json:"passwd"`
SessionID string `json:"SessionID"` SessionID string `json:"SessionID"`
UserName string `json:"UserName"` UserName string `json:"UserName"`
UserFirstName string `json:"UserFirstName"` UserFirstName string `json:"UserFirstName"`
UserLastName string `json:"UserLastName"` UserLastName string `json:"UserLastName"`
AccType string `json:"AccType"` AccType string `json:"AccType"`
UserLang string `json:"UserLang"` UserLang string `json:"UserLang"`
UserID string `json:"UserID"` UserID string `json:"UserID"`
IsAccountUser int `json:"IsAccountUser"` IsAccountUser json.RawMessage `json:"IsAccountUser"`
DriveName string `json:"DriveName"` DriveName string `json:"DriveName"`
UserLevel string `json:"UserLevel"` UserLevel string `json:"UserLevel"`
UserPlan string `json:"UserPlan"` UserPlan string `json:"UserPlan"`
FVersioning string `json:"FVersioning"` FVersioning string `json:"FVersioning"`
UserDomain string `json:"UserDomain"` UserDomain string `json:"UserDomain"`
PartnerUsersDomain string `json:"PartnerUsersDomain"` PartnerUsersDomain string `json:"PartnerUsersDomain"`
} }
// FolderList describes a OpenDRIVE listing // FolderList describes a OpenDRIVE listing
@ -52,9 +56,48 @@ type Folder struct {
Encrypted string `json:"Encrypted"` Encrypted string `json:"Encrypted"`
} }
type createFolder struct {
SessionID string `json:"session_id"`
FolderName string `json:"folder_name"`
FolderSubParent string `json:"folder_sub_parent"`
FolderIsPublic int64 `json:"folder_is_public"` // (0 = private, 1 = public, 2 = hidden)
FolderPublicUpl int64 `json:"folder_public_upl"` // (0 = disabled, 1 = enabled)
FolderPublicDisplay int64 `json:"folder_public_display"` // (0 = disabled, 1 = enabled)
FolderPublicDnl int64 `json:"folder_public_dnl"` // (0 = disabled, 1 = enabled).
}
type createFolderResponse struct {
FolderID string `json:"FolderID"`
Name string `json:"Name"`
DateCreated int `json:"DateCreated"`
DirUpdateTime int `json:"DirUpdateTime"`
Access int `json:"Access"`
DateModified int `json:"DateModified"`
Shared string `json:"Shared"`
Description string `json:"Description"`
Link string `json:"Link"`
}
type moveFolder struct {
SessionID string `json:"session_id"`
FolderID string `json:"folder_id"`
DstFolderID string `json:"dst_folder_id"`
Move string `json:"move"`
}
type moveFolderResponse struct {
FolderID string `json:"FolderID"`
}
type removeFolder struct {
SessionID string `json:"session_id"`
FolderID string `json:"folder_id"`
}
// File describes a OpenDRIVE file // File describes a OpenDRIVE file
type File struct { type File struct {
FileID string `json:"FileId"` FileID string `json:"FileId"`
FileHash string `json:"FileHash"`
Name string `json:"Name"` Name string `json:"Name"`
GroupID int `json:"GroupID"` GroupID int `json:"GroupID"`
Extension string `json:"Extension"` Extension string `json:"Extension"`
@ -74,6 +117,19 @@ type File struct {
EditOnline int `json:"EditOnline"` EditOnline int `json:"EditOnline"`
} }
type copyFile struct {
SessionID string `json:"session_id"`
SrcFileID string `json:"src_file_id"`
DstFolderID string `json:"dst_folder_id"`
Move string `json:"move"`
OverwriteIfExists string `json:"overwrite_if_exists"`
}
type copyFileResponse struct {
FileID string `json:"FileID"`
Size string `json:"Size"`
}
type createFile struct { type createFile struct {
SessionID string `json:"session_id"` SessionID string `json:"session_id"`
FolderID string `json:"folder_id"` FolderID string `json:"folder_id"`
@ -102,6 +158,12 @@ type createFileResponse struct {
RequireHashOnly int `json:"RequireHashOnly"` RequireHashOnly int `json:"RequireHashOnly"`
} }
type modTimeFile struct {
SessionID string `json:"session_id"`
FileID string `json:"file_id"`
FileModificationTime string `json:"file_modification_time"`
}
type openUpload struct { type openUpload struct {
SessionID string `json:"session_id"` SessionID string `json:"session_id"`
FileID string `json:"file_id"` FileID string `json:"file_id"`
@ -124,6 +186,14 @@ type closeUpload struct {
} }
type closeUploadResponse struct { type closeUploadResponse struct {
FileID string `json:"FileID"`
FileHash string `json:"FileHash"` FileHash string `json:"FileHash"`
Size int64 `json:"Size"` Size int64 `json:"Size"`
} }
type permissions struct {
SessionID string `json:"session_id"`
FileID string `json:"file_id"`
FileIsPublic int64 `json:"file_ispublic"`
}

View File

@ -38,6 +38,7 @@ docs = [
"mega.md", "mega.md",
"azureblob.md", "azureblob.md",
"onedrive.md", "onedrive.md",
"opendrive.md",
"qingstor.md", "qingstor.md",
"swift.md", "swift.md",
"pcloud.md", "pcloud.md",

View File

@ -85,6 +85,7 @@ from various cloud storage systems and using file transfer services, such as:
* Mega * Mega
* Microsoft Azure Blob Storage * Microsoft Azure Blob Storage
* Microsoft OneDrive * Microsoft OneDrive
* OpenDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore * Openstack Swift / Rackspace cloud files / Memset Memstore
* pCloud * pCloud
* QingStor * QingStor

View File

@ -34,6 +34,7 @@ Rclone is a command line program to sync files and directories to and from:
* {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}} * {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
* {{< provider name="Nextloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}} * {{< provider name="Nextloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}}
* {{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}} * {{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}}
* {{< provider name="OpenDrive" home="https://www.opendrive.com/" config="/opendrive/" >}}
* {{< provider name="Openstack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}} * {{< provider name="Openstack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
* {{< provider name="Oracle Cloud Storage" home="https://cloud.oracle.com/storage-opc" config="/swift/" >}} * {{< provider name="Oracle Cloud Storage" home="https://cloud.oracle.com/storage-opc" config="/swift/" >}}
* {{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}} * {{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}

View File

@ -37,6 +37,7 @@ See the following for detailed instructions for
* [Microsoft Azure Blob Storage](/azureblob/) * [Microsoft Azure Blob Storage](/azureblob/)
* [Microsoft OneDrive](/onedrive/) * [Microsoft OneDrive](/onedrive/)
* [Openstack Swift / Rackspace Cloudfiles / Memset Memstore](/swift/) * [Openstack Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
* [OpenDrive](/opendrive/)
* [Pcloud](/pcloud/) * [Pcloud](/pcloud/)
* [QingStor](/qingstor/) * [QingStor](/qingstor/)
* [SFTP](/sftp/) * [SFTP](/sftp/)

114
docs/content/opendrive.md Normal file
View File

@ -0,0 +1,114 @@
---
title: "OpenDrive"
description: "Rclone docs for OpenDrive"
date: "2017-08-07"
---
<i class="fa fa-file"></i> OpenDrive
------------------------------------
Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
n) New remote
d) Delete remote
q) Quit config
e/n/d/q> n
name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Amazon Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Encrypt/Decrypt a remote
\ "crypt"
6 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
7 / Google Drive
\ "drive"
8 / Hubic
\ "hubic"
9 / Local Disk
\ "local"
10 / OpenDrive
\ "opendrive"
11 / Microsoft OneDrive
\ "onedrive"
12 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
13 / SSH/SFTP Connection
\ "sftp"
14 / Yandex Disk
\ "yandex"
Storage> 10
Username
username>
Password
y) Yes type in my own password
g) Generate random password
y/g> y
Enter the password:
password:
Confirm the password:
password:
--------------------
[remote]
username =
password = *** ENCRYPTED ***
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
List directories in top level of your OpenDrive
rclone lsd remote:
List all the files in your OpenDrive
rclone ls remote:
To copy a local directory to an OpenDrive directory called backup
rclone copy /home/source remote:backup
### Modified time and MD5SUMs ###
OpenDrive allows modification times to be set on objects accurate to 1
second. These will be used to detect whether objects need syncing or
not.
### Deleting files ###
Any files you delete with rclone will end up in the trash. Amazon
don't provide an API to permanently delete files, nor to empty the
trash, so you will have to do that with one of Amazon's apps or via
the OpenDrive website. As of November 17, 2016, files are
automatically deleted by Amazon from the trash after 30 days.
### Limitations ###
Note that OpenDrive is case insensitive so you can't have a
file called "Hello.doc" and one called "hello.doc".
There are quite a few characters that can't be in OpenDrive file
names. These can't occur on Windows platforms, but on non-Windows
platforms they are common. Rclone will map these names to and from an
identical looking unicode equivalent. For example if a file has a `?`
in it will be mapped to `` instead.

View File

@ -30,6 +30,7 @@ Here is an overview of the major features of each cloud storage system.
| Mega | - | No | No | Yes | - | | Mega | - | No | No | Yes | - |
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W | | Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R | | Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |
| OpenDrive | - | Yes | Yes | No | - |
| Openstack Swift | MD5 | Yes | No | No | R/W | | Openstack Swift | MD5 | Yes | No | No | R/W |
| pCloud | MD5, SHA1 | Yes | No | No | W | | pCloud | MD5, SHA1 | Yes | No | No | W |
| QingStor | MD5 | No | No | No | R/W | | QingStor | MD5 | No | No | No | R/W |
@ -139,6 +140,7 @@ operations more efficient.
| Mega | Yes | No | Yes | Yes | No | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes | | Mega | Yes | No | Yes | Yes | No | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No | | Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
| Microsoft OneDrive | Yes | Yes | Yes | No [#197](https://github.com/ncw/rclone/issues/197) | No [#575](https://github.com/ncw/rclone/issues/575) | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes | | Microsoft OneDrive | Yes | Yes | Yes | No [#197](https://github.com/ncw/rclone/issues/197) | No [#575](https://github.com/ncw/rclone/issues/575) | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
| OpenDrive | Yes | No | No | No | No | No | No | No | No |
| Openstack Swift | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes | | Openstack Swift | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes | | pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
| QingStor | No | Yes | No | No | No | Yes | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No | | QingStor | No | Yes | No | No | No | Yes | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |

View File

@ -69,6 +69,7 @@
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li> <li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
<li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li> <li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li>
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li> <li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li>
<li><a href="/opendrive/"><i class="fa fa-space-shuttle"></i> OpenDrive</a></li>
<li><a href="/qingstor/"><i class="fa fa-hdd-o"></i> QingStor</a></li> <li><a href="/qingstor/"><i class="fa fa-hdd-o"></i> QingStor</a></li>
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Openstack Swift</a></li> <li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Openstack Swift</a></li>
<li><a href="/pcloud/"><i class="fa fa-cloud"></i> pCloud</a></li> <li><a href="/pcloud/"><i class="fa fa-cloud"></i> pCloud</a></li>