From a1263e70cfedc02c86334c60a82a6ef293d10c5b Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Sat, 6 Jul 2019 00:16:57 +0100 Subject: [PATCH] premiumizeme: new backend for premiumize.me - Fixes #3063 --- README.md | 1 + backend/all/all.go | 1 + backend/premiumizeme/api/types.go | 83 ++ backend/premiumizeme/premiumizeme.go | 1217 +++++++++++++++++++++ backend/premiumizeme/premiumizeme_test.go | 17 + bin/make_manual.py | 1 + docs/content/about.md | 1 + docs/content/docs.md | 1 + docs/content/overview.md | 2 + docs/content/premiumizeme.md | 118 ++ docs/layouts/chrome/navbar.html | 1 + fstest/test_all/config.yaml | 4 + 12 files changed, 1447 insertions(+) create mode 100644 backend/premiumizeme/api/types.go create mode 100644 backend/premiumizeme/premiumizeme.go create mode 100644 backend/premiumizeme/premiumizeme_test.go create mode 100644 docs/content/premiumizeme.md diff --git a/README.md b/README.md index 56ab62bb9..1d1cc198a 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * pCloud [:page_facing_up:](https://rclone.org/pcloud/) + * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * put.io [:page_facing_up:](https://rclone.org/webdav/#put-io) * QingStor [:page_facing_up:](https://rclone.org/qingstor/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) diff --git a/backend/all/all.go b/backend/all/all.go index 4373ac20f..2a5951579 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -24,6 +24,7 @@ import ( _ "github.com/rclone/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/pcloud" + _ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/sftp" diff --git a/backend/premiumizeme/api/types.go b/backend/premiumizeme/api/types.go new file mode 100644 index 000000000..f5a8cce6c --- /dev/null +++ b/backend/premiumizeme/api/types.go @@ -0,0 +1,83 @@ +// Package api contains definitions for using the premiumize.me API +package api + +import "fmt" + +// Response is returned by all messages and embedded in the +// structures below +type Response struct { + Message string `json:"message,omitempty"` + Status string `json:"status"` +} + +// Error statisfies the error interface +func (e *Response) Error() string { + return fmt.Sprintf("%s: %s", e.Status, e.Message) +} + +// AsErr checks the status and returns an err if bad or nil if good +func (e *Response) AsErr() error { + if e.Status != "success" { + return e + } + return nil +} + +// Item Types +const ( + ItemTypeFolder = "folder" + ItemTypeFile = "file" +) + +// Item refers to a file or folder +type Item struct { + Breadcrumbs []Breadcrumb `json:"breadcrumbs"` + CreatedAt int64 `json:"created_at,omitempty"` + ID string `json:"id"` + Link string `json:"link,omitempty"` + Name string `json:"name"` + Size int64 `json:"size,omitempty"` + StreamLink string `json:"stream_link,omitempty"` + Type string `json:"type"` + TranscodeStatus string `json:"transcode_status"` + IP string `json:"ip"` + MimeType string `json:"mime_type"` +} + +// Breadcrumb is part the breadcrumb trail for a file or folder. It +// is returned as part of folder/list if required +type Breadcrumb struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + ParentID string `json:"parent_id,omitempty"` +} + +// FolderListResponse is the response to folder/list +type FolderListResponse struct { + Response + Content []Item `json:"content"` + Name string `json:"name,omitempty"` + ParentID string `json:"parent_id,omitempty"` +} + +// FolderCreateResponse is the response to folder/create +type FolderCreateResponse struct { + Response + ID string `json:"id,omitempty"` +} + +// FolderUploadinfoResponse is the response to folder/uploadinfo +type FolderUploadinfoResponse struct { + Response + Token string `json:"token,omitempty"` + URL string `json:"url,omitempty"` +} + +// AccountInfoResponse is the response to account/info +type AccountInfoResponse struct { + Response + CustomerID string `json:"customer_id,omitempty"` + LimitUsed float64 `json:"limit_used,omitempty"` // fraction 0..1 of download traffic limit + PremiumUntil int64 `json:"premium_until,omitempty"` + SpaceUsed float64 `json:"space_used,omitempty"` +} diff --git a/backend/premiumizeme/premiumizeme.go b/backend/premiumizeme/premiumizeme.go new file mode 100644 index 000000000..e9b263185 --- /dev/null +++ b/backend/premiumizeme/premiumizeme.go @@ -0,0 +1,1217 @@ +// Package premiumizeme provides an interface to the premiumize.me +// object storage system. +package premiumizeme + +/* FIXME +escaping needs fixing + +Run of rclone info +stringNeedsEscaping = []rune{ + 0x00, 0x0A, 0x0D, 0x22, 0x2F, 0x5C, 0xBF, 0xFE + 0x00, 0x0A, 0x0D, '"', '/', '\\', 0xBF, 0xFE +} +maxFileLength = 255 +canWriteUnnormalized = true +canReadUnnormalized = true +canReadRenormalized = false +canStream = false +*/ + +import ( + "context" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "log" + "net" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/backend/premiumizeme/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/oauthutil" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/rest" + "golang.org/x/oauth2" +) + +const ( + rcloneClientID = "658922194" + rcloneEncryptedClientSecret = "B5YIvQoRIhcpAYs8HYeyjb9gK-ftmZEbqdh_gNfc4RgO9Q" + minSleep = 10 * time.Millisecond + maxSleep = 2 * time.Second + decayConstant = 2 // bigger for slower decay, exponential + rootID = "0" // ID of root folder is always this + rootURL = "https://www.premiumize.me/api" +) + +// Globals +var ( + // Description of how to auth for this app + oauthConfig = &oauth2.Config{ + Scopes: nil, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://www.premiumize.me/authorize", + TokenURL: "https://www.premiumize.me/token", + }, + ClientID: rcloneClientID, + ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), + RedirectURL: oauthutil.RedirectURL, + } +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "premiumizeme", + Description: "premiumize.me", + NewFs: NewFs, + Config: func(name string, m configmap.Mapper) { + err := oauthutil.Config("premiumizeme", name, m, oauthConfig) + if err != nil { + log.Fatalf("Failed to configure token: %v", err) + } + }, + Options: []fs.Option{{ + Name: "api_key", + Help: `API Key. + +This is not normally used - use oauth instead. +`, + Hide: fs.OptionHideBoth, + Default: "", + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + APIKey string `config:"api_key"` +} + +// Fs represents a remote cloud storage system +type Fs struct { + name string // name of this remote + root string // the path we are working on + opt Options // parsed options + features *fs.Features // optional features + srv *rest.Client // the connection to the server + dirCache *dircache.DirCache // Map of directory path to directory id + pacer *fs.Pacer // pacer for API calls + tokenRenewer *oauthutil.Renew // renew the token on expiry +} + +// Object describes a file +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + hasMetaData bool // metadata is present and correct + size int64 // size of the object + modTime time.Time // modification time of the object + id string // ID of the object + parentID string // ID of parent directory + mimeType string // Mime type of object + url string // URL to download file +} + +// ------------------------------------------------------------ + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("premiumize.me root '%s'", f.root) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// parsePath parses a premiumize.me 'url' +func parsePath(path string) (root string) { + root = strings.Trim(path, "/") + return +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout + 509, // Bandwidth Limit Exceeded +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func shouldRetry(resp *http.Response, err error) (bool, error) { + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +// substitute reserved characters +func replaceReservedChars(x string) string { + // Backslash for FULLWIDTH REVERSE SOLIDUS + x = strings.Replace(x, "\\", "\", -1) + // Double quote for FULLWIDTH QUOTATION MARK + x = strings.Replace(x, `"`, """, -1) + return x +} + +// restore reserved characters +func restoreReservedChars(x string) string { + // FULLWIDTH QUOTATION MARK for Double quote + x = strings.Replace(x, """, `"`, -1) + // FULLWIDTH REVERSE SOLIDUS for Backslash + x = strings.Replace(x, "\", "\\", -1) + return x +} + +// readMetaDataForPath reads the metadata from the path +func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { + // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false) + if err != nil { + if err == fs.ErrorDirNotFound { + return nil, fs.ErrorObjectNotFound + } + return nil, err + } + + lcLeaf := strings.ToLower(leaf) + found, err := f.listAll(directoryID, directoriesOnly, filesOnly, func(item *api.Item) bool { + if strings.ToLower(item.Name) == lcLeaf { + info = item + return true + } + return false + }) + if err != nil { + return nil, err + } + if !found { + return nil, fs.ErrorObjectNotFound + } + return info, nil +} + +// errorHandler parses a non 2xx error response into an error +func errorHandler(resp *http.Response) error { + body, err := rest.ReadBody(resp) + if err != nil { + body = nil + } + var e = api.Response{ + Message: string(body), + Status: fmt.Sprintf("%s (%d)", resp.Status, resp.StatusCode), + } + if body != nil { + _ = json.Unmarshal(body, &e) + } + return &e +} + +// Return a url.Values with the api key in +func (f *Fs) baseParams() url.Values { + params := url.Values{} + if f.opt.APIKey != "" { + params.Add("apikey", f.opt.APIKey) + } + return params +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + root = parsePath(root) + + var client *http.Client + var ts *oauthutil.TokenSource + if opt.APIKey == "" { + client, ts, err = oauthutil.NewClient(name, m, oauthConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to configure premiumize.me") + } + } else { + client = fshttp.NewClient(fs.Config) + } + + f := &Fs{ + name: name, + root: root, + opt: *opt, + srv: rest.NewClient(client).SetRoot(rootURL), + pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + } + f.features = (&fs.Features{ + CaseInsensitive: true, + CanHaveEmptyDirectories: true, + ReadMimeType: true, + }).Fill(f) + f.srv.SetErrorHandler(errorHandler) + + // Renew the token in the background + if ts != nil { + f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { + _, err := f.About(ctx) + return err + }) + } + + // Get rootID + f.dirCache = dircache.New(root, rootID, f) + + // Find the current root + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + // Assume it is a file + newRoot, remote := dircache.SplitPath(root) + tempF := *f + tempF.dirCache = dircache.New(newRoot, rootID, &tempF) + tempF.root = newRoot + // Make new Fs which is the parent + err = tempF.dirCache.FindRoot(ctx, false) + if err != nil { + // No root so return old f + return f, nil + } + _, err := tempF.newObjectWithInfo(ctx, remote, nil) + if err != nil { + if err == fs.ErrorObjectNotFound { + // File doesn't exist so return old f + return f, nil + } + return nil, err + } + f.features.Fill(&tempF) + // XXX: update the old f here instead of returning tempF, since + // `features` were already filled with functions having *f as a receiver. + // See https://github.com/rclone/rclone/issues/2182 + f.dirCache = tempF.dirCache + f.root = tempF.root + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + return f, nil +} + +// rootSlash returns root with a slash on if it is empty, otherwise empty string +func (f *Fs) rootSlash() string { + if f.root == "" { + return f.root + } + return f.root + "/" +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + var err error + if info != nil { + // Set info + err = o.setMetaData(info) + } else { + err = o.readMetaData(ctx) // reads info and meta, returning an error + } + if err != nil { + return nil, err + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) +} + +// FindLeaf finds a directory of name leaf in the folder with ID pathID +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { + // Find the leaf in pathID + found, err = f.listAll(pathID, true, false, func(item *api.Item) bool { + if item.Name == leaf { + pathIDOut = item.ID + return true + } + return false + }) + return pathIDOut, found, err +} + +// CreateDir makes a directory with pathID as parent and name leaf +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { + // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) + var resp *http.Response + var info api.FolderCreateResponse + opts := rest.Opts{ + Method: "POST", + Path: "/folder/create", + Parameters: f.baseParams(), + MultipartParams: url.Values{ + "name": {replaceReservedChars(leaf)}, + "parent_id": {pathID}, + }, + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, nil, &info) + return shouldRetry(resp, err) + }) + if err != nil { + //fmt.Printf("...Error %v\n", err) + return "", errors.Wrap(err, "CreateDir http") + } + if err = info.AsErr(); err != nil { + return "", errors.Wrap(err, "CreateDir") + } + // fmt.Printf("...Id %q\n", *info.Id) + return info.ID, nil +} + +// list the objects into the function supplied +// +// If directories is set it only sends directories +// User function to process a File item from listAll +// +// Should return true to finish processing +type listAllFn func(*api.Item) bool + +// Lists the directory required calling the user function on each item found +// +// If the user fn ever returns true then it early exits with found = true +func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { + opts := rest.Opts{ + Method: "GET", + Path: "/folder/list", + Parameters: f.baseParams(), + } + opts.Parameters.Set("id", dirID) + opts.Parameters.Set("includebreadcrumbs", "false") + + var result api.FolderListResponse + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, nil, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return found, errors.Wrap(err, "couldn't list files") + } + if err = result.AsErr(); err != nil { + return found, errors.Wrap(err, "error while listing") + } + for i := range result.Content { + item := &result.Content[i] + if item.Type == api.ItemTypeFolder { + if filesOnly { + continue + } + } else if item.Type == api.ItemTypeFile { + if directoriesOnly { + continue + } + } else { + fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) + continue + } + item.Name = restoreReservedChars(item.Name) + if fn(item) { + found = true + break + } + } + + return +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + return nil, err + } + directoryID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return nil, err + } + var iErr error + _, err = f.listAll(directoryID, false, false, func(info *api.Item) bool { + remote := path.Join(dir, info.Name) + if info.Type == api.ItemTypeFolder { + // cache the directory ID for later lookups + f.dirCache.Put(remote, info.ID) + d := fs.NewDir(remote, time.Unix(info.CreatedAt, 0)).SetID(info.ID) + entries = append(entries, d) + } else if info.Type == api.ItemTypeFile { + o, err := f.newObjectWithInfo(ctx, remote, info) + if err != nil { + iErr = err + return true + } + entries = append(entries, o) + } + return false + }) + if err != nil { + return nil, err + } + if iErr != nil { + return nil, iErr + } + return entries, nil +} + +// Creates from the parameters passed in a half finished Object which +// must have setMetaData called on it +// +// Returns the object, leaf, directoryID and error +// +// Used to create new objects +func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { + // Create the directory for the object if it doesn't exist + leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true) + if err != nil { + return + } + // Temporary Object under construction + o = &Object{ + fs: f, + remote: remote, + } + return o, leaf, directoryID, nil +} + +// Put the object +// +// Copy the reader in to the new object which is returned +// +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) + switch err { + case nil: + return existingObj, existingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + // Not found so create it + return f.PutUnchecked(ctx, in, src) + default: + return nil, err + } +} + +// PutUnchecked the object into the container +// +// This will produce an error if the object already exists +// +// Copy the reader in to the new object which is returned +// +// The new object may have been created if an error is returned +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + remote := src.Remote() + size := src.Size() + modTime := src.ModTime(ctx) + + o, _, _, err := f.createObject(ctx, remote, modTime, size) + if err != nil { + return nil, err + } + return o, o.Update(ctx, in, src, options...) +} + +// Mkdir creates the container if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + err := f.dirCache.FindRoot(ctx, true) + if err != nil { + return err + } + if dir != "" { + _, err = f.dirCache.FindDir(ctx, dir, true) + } + return err +} + +// purgeCheck removes the root directory, if check is set then it +// refuses to do so if it has anything in +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { + root := path.Join(f.root, dir) + if root == "" { + return errors.New("can't purge root directory") + } + dc := f.dirCache + err := dc.FindRoot(ctx, false) + if err != nil { + return err + } + rootID, err := dc.FindDir(ctx, dir, false) + if err != nil { + return err + } + + // need to check if empty as it will delete recursively by default + if check { + found, err := f.listAll(rootID, false, false, func(item *api.Item) bool { + return true + }) + if err != nil { + return errors.Wrap(err, "purgeCheck") + } + if found { + return fs.ErrorDirectoryNotEmpty + } + } + + opts := rest.Opts{ + Method: "POST", + Path: "/folder/delete", + MultipartParams: url.Values{ + "id": {rootID}, + }, + Parameters: f.baseParams(), + } + var resp *http.Response + var result api.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, nil, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "rmdir failed") + } + if err = result.AsErr(); err != nil { + return errors.Wrap(err, "rmdir") + } + f.dirCache.FlushDir(dir) + if err != nil { + return err + } + return nil +} + +// Rmdir deletes the root folder +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) +} + +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + return fs.ModTimeNotSupported +} + +// Purge deletes all the files and the container +// +// Optional interface: Only implement this if you have a way of +// deleting all the files quicker than just running Remove() on the +// result of List() +func (f *Fs) Purge(ctx context.Context) error { + return f.purgeCheck(ctx, "", false) +} + +// move a file or folder +// +// This is complicated by the fact that there is an API to move files +// between directories and a separate one to rename them. We try to +// call the minimum number of API calls. +func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (err error) { + newLeaf = replaceReservedChars(newLeaf) + oldLeaf = replaceReservedChars(oldLeaf) + doRenameLeaf := oldLeaf != newLeaf + doMove := oldDirectoryID != newDirectoryID + + // Now rename the leaf to a temporary name if we are moving to + // another directory to make sure we don't overwrite something + // in the destination directory by accident + if doRenameLeaf && doMove { + tmpLeaf := newLeaf + "." + randomString(8) + err = f.renameLeaf(ctx, isFile, id, tmpLeaf) + if err != nil { + return errors.Wrap(err, "Move rename leaf") + } + } + + // Move the object to a new directory (with the existing name) + // if required + if doMove { + opts := rest.Opts{ + Method: "POST", + Path: "/folder/paste", + Parameters: f.baseParams(), + MultipartParams: url.Values{ + "id": {newDirectoryID}, + }, + } + if isFile { + opts.MultipartParams.Set("files[]", id) + } else { + opts.MultipartParams.Set("folders[]", id) + } + //replacedLeaf := replaceReservedChars(leaf) + var resp *http.Response + var result api.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, nil, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "Move http") + } + if err = result.AsErr(); err != nil { + return errors.Wrap(err, "Move") + } + } + + // Rename the leaf to its final name if required + if doRenameLeaf { + err = f.renameLeaf(ctx, isFile, id, newLeaf) + if err != nil { + return errors.Wrap(err, "Move rename leaf") + } + } + + return nil +} + +// Move src to this remote using server side move operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + + // Create temporary object + dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) + if err != nil { + return nil, err + } + + // Do the move + err = f.move(ctx, true, srcObj.id, path.Base(srcObj.remote), leaf, srcObj.parentID, directoryID) + if err != nil { + return nil, err + } + + err = dstObj.readMetaData(ctx) + if err != nil { + return nil, err + } + return dstObj, nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + srcPath := path.Join(srcFs.root, srcRemote) + dstPath := path.Join(f.root, dstRemote) + + // Refuse to move to or from the root + if srcPath == "" || dstPath == "" { + fs.Debugf(src, "DirMove error: Can't move root") + return errors.New("can't move root directory") + } + + // find the root src directory + err := srcFs.dirCache.FindRoot(ctx, false) + if err != nil { + return err + } + + // find the root dst directory + if dstRemote != "" { + err = f.dirCache.FindRoot(ctx, true) + if err != nil { + return err + } + } else { + if f.dirCache.FoundRoot() { + return fs.ErrorDirExists + } + } + + // Find ID of dst parent, creating subdirs if necessary + var leaf, directoryID string + findPath := dstRemote + if dstRemote == "" { + findPath = f.root + } + leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true) + if err != nil { + return err + } + + // Check destination does not exist + if dstRemote != "" { + _, err = f.dirCache.FindDir(ctx, dstRemote, false) + if err == fs.ErrorDirNotFound { + // OK + } else if err != nil { + return err + } else { + return fs.ErrorDirExists + } + } + + // Find ID of src + srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) + if err != nil { + return err + } + + // Find ID of src parent, not creating subdirs + var srcLeaf, srcDirectoryID string + findPath = srcRemote + if srcRemote == "" { + findPath = srcFs.root + } + srcLeaf, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false) + if err != nil { + return err + } + + // Do the move + err = f.move(ctx, false, srcID, srcLeaf, leaf, srcDirectoryID, directoryID) + if err != nil { + return err + } + srcFs.dirCache.FlushDir(srcRemote) + return nil +} + +// PublicLink adds a "readable by anyone with link" permission on the given file or folder. +func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) { + _, err := f.dirCache.FindDir(ctx, remote, false) + if err == nil { + return "", fs.ErrorCantShareDirectories + } + o, err := f.NewObject(ctx, remote) + if err != nil { + return "", err + } + return o.(*Object).url, nil +} + +// About gets quota information +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { + var resp *http.Response + var info api.AccountInfoResponse + opts := rest.Opts{ + Method: "POST", + Path: "/account/info", + Parameters: f.baseParams(), + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, nil, &info) + return shouldRetry(resp, err) + }) + if err != nil { + return nil, errors.Wrap(err, "CreateDir http") + } + if err = info.AsErr(); err != nil { + return nil, errors.Wrap(err, "CreateDir") + } + usage = &fs.Usage{ + Used: fs.NewUsageValue(int64(info.SpaceUsed)), + } + return usage, nil +} + +// DirCacheFlush resets the directory cache - used in testing as an +// optional interface +func (f *Fs) DirCacheFlush() { + f.dirCache.ResetRoot() +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// srvPath returns a path for use in server +func (o *Object) srvPath() string { + return replaceReservedChars(o.fs.rootSlash() + o.remote) +} + +// Hash returns the SHA-1 of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + err := o.readMetaData(context.TODO()) + if err != nil { + fs.Logf(o, "Failed to read metadata: %v", err) + return 0 + } + return o.size +} + +// setMetaData sets the metadata from info +func (o *Object) setMetaData(info *api.Item) (err error) { + if info.Type != "file" { + return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type) + } + o.hasMetaData = true + o.size = info.Size + o.modTime = time.Unix(info.CreatedAt, 0) + o.id = info.ID + o.mimeType = info.MimeType + o.url = info.Link + return nil +} + +// readMetaData gets the metadata if it hasn't already been fetched +// +// it also sets the info +func (o *Object) readMetaData(ctx context.Context) (err error) { + if o.hasMetaData { + return nil + } + info, err := o.fs.readMetaDataForPath(ctx, o.remote, false, true) + if err != nil { + return err + } + return o.setMetaData(info) +} + +// ModTime returns the modification time of the object +// +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + err := o.readMetaData(ctx) + if err != nil { + fs.Logf(o, "Failed to read metadata: %v", err) + return time.Now() + } + return o.modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + return fs.ErrorCantSetModTime +} + +// Storable returns a boolean showing whether this object storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + if o.url == "" { + return nil, errors.New("can't download - no URL") + } + fs.FixRangeOption(options, o.size) + var resp *http.Response + opts := rest.Opts{ + Path: "", + RootURL: o.url, + Method: "GET", + Options: options, + } + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.Call(&opts) + return shouldRetry(resp, err) + }) + if err != nil { + return nil, err + } + return resp.Body, err +} + +// metaHash returns a rough hash of metadata to detect if object has been updated +func (o *Object) metaHash() string { + if !o.hasMetaData { + return "" + } + return fmt.Sprintf("remote=%q, size=%d, modTime=%v, id=%q, mimeType=%q", o.remote, o.size, o.modTime, o.id, o.mimeType) +} + +// randomString returns a string with 8*bytes bits of entropy +func randomString(bytes int) string { + var pw = make([]byte, bytes) + _, _ = rand.Read(pw) + return base64.RawURLEncoding.EncodeToString(pw) +} + +// Update the object with the contents of the io.Reader, modTime and size +// +// If existing is set then it updates the object rather than creating a new one +// +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + remote := o.Remote() + size := src.Size() + + // Create the directory for the object if it doesn't exist + leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true) + if err != nil { + return err + } + leaf = replaceReservedChars(leaf) + + var resp *http.Response + var info api.FolderUploadinfoResponse + opts := rest.Opts{ + Method: "POST", + Path: "/folder/uploadinfo", + Parameters: o.fs.baseParams(), + MultipartParams: url.Values{ + "id": {directoryID}, + }, + } + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(&opts, nil, &info) + if err != nil { + return shouldRetry(resp, err) + } + // Just check the download URL resolves - sometimes + // the URLs returned by premiumize.me don't resolve so + // this needs a retry. + var u *url.URL + u, err = url.Parse(info.URL) + if err != nil { + return true, errors.Wrap(err, "failed to parse download URL") + } + _, err = net.LookupIP(u.Hostname()) + if err != nil { + return true, errors.Wrap(err, "failed to resolve download URL") + } + return false, nil + }) + if err != nil { + return errors.Wrap(err, "upload get URL http") + } + if err = info.AsErr(); err != nil { + return errors.Wrap(err, "upload get URL") + } + + // if file exists then rename it out the way otherwise uploads can fail + uploaded := false + var oldID = o.id + if o.hasMetaData { + newLeaf := leaf + "." + randomString(8) + fs.Debugf(o, "Moving old file out the way to %q", newLeaf) + err = o.fs.renameLeaf(ctx, true, oldID, newLeaf) + if err != nil { + return errors.Wrap(err, "upload rename old file") + } + defer func() { + // on failed upload rename old file back + if !uploaded { + fs.Debugf(o, "Renaming old file back (from %q to %q) since upload failed", leaf, newLeaf) + newErr := o.fs.renameLeaf(ctx, true, oldID, leaf) + if newErr != nil && err == nil { + err = errors.Wrap(newErr, "upload renaming old file back") + } + } + }() + } + + opts = rest.Opts{ + Method: "POST", + RootURL: info.URL, + Body: in, + MultipartParams: url.Values{ + "token": {info.Token}, + }, + MultipartContentName: "file", // ..name of the parameter which is the attached file + MultipartFileName: leaf, // ..name of the file for the attached file + ContentLength: &size, + } + var result api.Response + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(&opts, nil, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "upload file http") + } + if err = result.AsErr(); err != nil { + return errors.Wrap(err, "upload file") + } + + // on successful upload, remove old file if it exists + uploaded = true + if o.hasMetaData { + fs.Debugf(o, "Removing old file") + err := o.fs.remove(ctx, oldID) + if err != nil { + return errors.Wrap(err, "upload remove old file") + } + } + + o.hasMetaData = false + return o.readMetaData(ctx) +} + +// Rename the leaf of a file or directory in a directory +func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf string) (err error) { + opts := rest.Opts{ + Method: "POST", + MultipartParams: url.Values{ + "id": {id}, + "name": {newLeaf}, + }, + Parameters: f.baseParams(), + } + if isFile { + opts.Path = "/item/rename" + } else { + opts.Path = "/folder/rename" + } + var resp *http.Response + var result api.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, nil, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "rename http") + } + if err = result.AsErr(); err != nil { + return errors.Wrap(err, "rename") + } + return nil +} + +// Remove an object by ID +func (f *Fs) remove(ctx context.Context, id string) (err error) { + opts := rest.Opts{ + Method: "POST", + Path: "/item/delete", + MultipartParams: url.Values{ + "id": {id}, + }, + Parameters: f.baseParams(), + } + var resp *http.Response + var result api.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, nil, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "remove http") + } + if err = result.AsErr(); err != nil { + return errors.Wrap(err, "remove") + } + return nil +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + err := o.readMetaData(ctx) + if err != nil { + return errors.Wrap(err, "Remove: Failed to read metadata") + } + return o.fs.remove(ctx, o.id) +} + +// MimeType of an Object if known, "" otherwise +func (o *Object) MimeType(ctx context.Context) string { + return o.mimeType +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.id +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.Abouter = (*Fs)(nil) + _ fs.PublicLinker = (*Fs)(nil) + _ fs.Object = (*Object)(nil) + _ fs.MimeTyper = (*Object)(nil) + _ fs.IDer = (*Object)(nil) +) diff --git a/backend/premiumizeme/premiumizeme_test.go b/backend/premiumizeme/premiumizeme_test.go new file mode 100644 index 000000000..de5bfa2a9 --- /dev/null +++ b/backend/premiumizeme/premiumizeme_test.go @@ -0,0 +1,17 @@ +// Test filesystem interface +package premiumizeme_test + +import ( + "testing" + + "github.com/rclone/rclone/backend/premiumizeme" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestPremiumizeMe:", + NilObject: (*premiumizeme.Object)(nil), + }) +} diff --git a/bin/make_manual.py b/bin/make_manual.py index 0ab298646..e3f0368fb 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -47,6 +47,7 @@ docs = [ "qingstor.md", "swift.md", "pcloud.md", + "premiumize.md", "sftp.md", "union.md", "webdav.md", diff --git a/docs/content/about.md b/docs/content/about.md index 4e134b715..54376bc67 100644 --- a/docs/content/about.md +++ b/docs/content/about.md @@ -45,6 +45,7 @@ Rclone is a command line program to sync files and directories to and from: * {{< provider name="Oracle Cloud Storage" home="https://cloud.oracle.com/storage-opc" config="/swift/" >}} * {{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}} * {{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}} +* {{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}} * {{< provider name="put.io" home="https://put.io/" config="/webdav/#put-io" >}} * {{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}} * {{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}} diff --git a/docs/content/docs.md b/docs/content/docs.md index 3025452fb..b855809ab 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -43,6 +43,7 @@ See the following for detailed instructions for * [Openstack Swift / Rackspace Cloudfiles / Memset Memstore](/swift/) * [OpenDrive](/opendrive/) * [Pcloud](/pcloud/) + * [premiumize.me](/premiumizeme/) * [QingStor](/qingstor/) * [SFTP](/sftp/) * [Union](/union/) diff --git a/docs/content/overview.md b/docs/content/overview.md index 29f87a1e4..16904b8f6 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -37,6 +37,7 @@ Here is an overview of the major features of each cloud storage system. | OpenDrive | MD5 | Yes | Yes | No | - | | Openstack Swift | MD5 | Yes | No | No | R/W | | pCloud | MD5, SHA1 | Yes | No | No | W | +| premiumize.me | - | No | Yes | No | R | | QingStor | MD5 | No | No | No | R/W | | SFTP | MD5, SHA1 ‡ | Yes | Depends | No | - | | WebDAV | MD5, SHA1 ††| Yes ††† | Depends | No | - | @@ -152,6 +153,7 @@ operations more efficient. | OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | | Openstack Swift | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | | pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | +| premiumize.me | Yes | No | Yes | Yes | No | No | No | Yes | Yes | | QingStor | No | Yes | No | No | No | Yes | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | | SFTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | | WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | diff --git a/docs/content/premiumizeme.md b/docs/content/premiumizeme.md new file mode 100644 index 000000000..2d66e5cc1 --- /dev/null +++ b/docs/content/premiumizeme.md @@ -0,0 +1,118 @@ +--- +title: "premiumize.me" +description: "Rclone docs for premiumize.me" +date: "2019-08-10" +--- + + premiumize.me +----------------------------------------- + +Paths are specified as `remote:path` + +Paths may be as deep as required, eg `remote:directory/subdirectory`. + +The initial setup for [premiumize.me](https://premiumize.me/) involves getting a token from premiumize.me which you +need to do in your browser. `rclone config` walks you through it. + +Here is an example of how to make a remote called `remote`. First run: + + rclone config + +This will guide you through an interactive setup process: + +``` +No remotes found - make a new one +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> remote +Type of storage to configure. +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value +[snip] +30 / premiumize.me + \ "premiumizeme" +Storage> premiumizeme +** See help for premiumizeme backend at: https://rclone.org/premiumizeme/ ** + +Remote config +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine +y) Yes +n) No +y/n> y +If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth +Log in and authorize rclone for access +Waiting for code... +Got code +-------------------- +[remote] +type = premiumizeme +token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} +-------------------- +y) Yes this is OK +e) Edit this remote +d) Delete this remote +y/e/d> +``` + +See the [remote setup docs](/remote_setup/) for how to set it up on a +machine with no Internet browser available. + +Note that rclone runs a webserver on your local machine to collect the +token as returned from premiumize.me. This only runs from the moment it opens +your browser to the moment you get back the verification code. This +is on `http://127.0.0.1:53682/` and this it may require you to unblock +it temporarily if you are running a host firewall. + +Once configured you can then use `rclone` like this, + +List directories in top level of your premiumize.me + + rclone lsd remote: + +List all the files in your premiumize.me + + rclone ls remote: + +To copy a local directory to an premiumize.me directory called backup + + rclone copy /home/source remote:backup + +### Modified time and hashes ### + +premiumize.me does not support modification times or hashes, therefore +syncing will default to `--size-only` checking. Note that using +`--update` will work. + + +### Standard Options + +Here are the standard options specific to premiumizeme (premiumize.me). + +#### --premiumizeme-api-key + +API Key. + +This is not normally used - use oauth instead. + + +- Config: api_key +- Env Var: RCLONE_PREMIUMIZEME_API_KEY +- Type: string +- Default: "" + + + +### Limitations ### + +Note that premiumize.me is case insensitive so you can't have a file called +"Hello.doc" and one called "hello.doc". + +premiumize.me file names can't have the `\` or `"` characters in. +rclone maps these to and from an identical looking unicode equivalents +`\` and `"` + +premiumize.me only supports filenames up to 255 characters in length. diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index f8ed8907a..63369f471 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -79,6 +79,7 @@
  • QingStor
  • Openstack Swift
  • pCloud
  • +
  • premiumize.me
  • SFTP
  • Union (merge backends)
  • WebDAV
  • diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index 1724b58dd..908697ebf 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -157,3 +157,7 @@ backends: remote: "TestKoofr:" subdir: false fastlist: false + - backend: "premiumizeme" + remote: "TestPremiumizeMe:" + subdir: false + fastlist: false