From ba1daea07263f1e4b65898141cc3e9a0425e8803 Mon Sep 17 00:00:00 2001 From: Ivan Andreev Date: Mon, 9 Sep 2019 21:56:16 +0100 Subject: [PATCH] mailru: backend for mail.ru --- README.md | 1 + backend/all/all.go | 1 + backend/mailru/api/bin.go | 107 ++ backend/mailru/api/helpers.go | 225 +++ backend/mailru/api/m1.go | 248 ++++ backend/mailru/mailru.go | 2378 +++++++++++++++++++++++++++++++ backend/mailru/mailru_test.go | 18 + bin/make_manual.py | 1 + docs/content/about.md | 1 + docs/content/docs.md | 1 + docs/content/mailru.md | 280 ++++ docs/content/overview.md | 4 + docs/layouts/chrome/navbar.html | 1 + fstest/test_all/config.yaml | 4 + 14 files changed, 3270 insertions(+) create mode 100644 backend/mailru/api/bin.go create mode 100644 backend/mailru/api/helpers.go create mode 100644 backend/mailru/api/m1.go create mode 100644 backend/mailru/mailru.go create mode 100644 backend/mailru/mailru_test.go create mode 100644 docs/content/mailru.md diff --git a/README.md b/README.md index 9b7743716..ca2617c78 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * Koofr [:page_facing_up:](https://rclone.org/koofr/) + * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Mega [:page_facing_up:](https://rclone.org/mega/) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) diff --git a/backend/all/all.go b/backend/all/all.go index 6ccf6163c..cbf6ee4d6 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -20,6 +20,7 @@ import ( _ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/local" + _ "github.com/rclone/rclone/backend/mailru" _ "github.com/rclone/rclone/backend/mega" _ "github.com/rclone/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/opendrive" diff --git a/backend/mailru/api/bin.go b/backend/mailru/api/bin.go new file mode 100644 index 000000000..de8d1dbb3 --- /dev/null +++ b/backend/mailru/api/bin.go @@ -0,0 +1,107 @@ +package api + +// BIN protocol constants +const ( + BinContentType = "application/x-www-form-urlencoded" + TreeIDLength = 12 + DunnoNodeIDLength = 16 +) + +// Operations in binary protocol +const ( + OperationAddFile = 103 // 0x67 + OperationRename = 105 // 0x69 + OperationCreateFolder = 106 // 0x6A + OperationFolderList = 117 // 0x75 + OperationSharedFoldersList = 121 // 0x79 + // TODO investigate opcodes below + Operation154MaybeItemInfo = 154 // 0x9A + Operation102MaybeAbout = 102 // 0x66 + Operation104MaybeDelete = 104 // 0x68 +) + +// CreateDir protocol constants +const ( + MkdirResultOK = 0 + MkdirResultSourceNotExists = 1 + MkdirResultAlreadyExists = 4 + MkdirResultExistsDifferentCase = 9 + MkdirResultInvalidName = 10 + MkdirResultFailed254 = 254 +) + +// Move result codes +const ( + MoveResultOK = 0 + MoveResultSourceNotExists = 1 + MoveResultFailed002 = 2 + MoveResultAlreadyExists = 4 + MoveResultFailed005 = 5 + MoveResultFailed254 = 254 +) + +// AddFile result codes +const ( + AddResultOK = 0 + AddResultError01 = 1 + AddResultDunno04 = 4 + AddResultWrongPath = 5 + AddResultNoFreeSpace = 7 + AddResultDunno09 = 9 + AddResultInvalidName = 10 + AddResultNotModified = 12 + AddResultFailedA = 253 + AddResultFailedB = 254 +) + +// List request options +const ( + ListOptTotalSpace = 1 + ListOptDelete = 2 + ListOptFingerprint = 4 + ListOptUnknown8 = 8 + ListOptUnknown16 = 16 + ListOptFolderSize = 32 + ListOptUsedSpace = 64 + ListOptUnknown128 = 128 + ListOptUnknown256 = 256 +) + +// ListOptDefaults ... +const ListOptDefaults = ListOptUnknown128 | ListOptUnknown256 | ListOptFolderSize | ListOptTotalSpace | ListOptUsedSpace + +// List parse flags +const ( + ListParseDone = 0 + ListParseReadItem = 1 + ListParsePin = 2 + ListParsePinUpper = 3 + ListParseUnknown15 = 15 +) + +// List operation results +const ( + ListResultOK = 0 + ListResultNotExists = 1 + ListResultDunno02 = 2 + ListResultDunno03 = 3 + ListResultAlreadyExists04 = 4 + ListResultDunno05 = 5 + ListResultDunno06 = 6 + ListResultDunno07 = 7 + ListResultDunno08 = 8 + ListResultAlreadyExists09 = 9 + ListResultDunno10 = 10 + ListResultDunno11 = 11 + ListResultDunno12 = 12 + ListResultFailedB = 253 + ListResultFailedA = 254 +) + +// Directory item types +const ( + ListItemMountPoint = 0 + ListItemFile = 1 + ListItemFolder = 2 + ListItemSharedFolder = 3 +) diff --git a/backend/mailru/api/helpers.go b/backend/mailru/api/helpers.go new file mode 100644 index 000000000..ac8574411 --- /dev/null +++ b/backend/mailru/api/helpers.go @@ -0,0 +1,225 @@ +package api + +// BIN protocol helpers + +import ( + "bufio" + "bytes" + "encoding/binary" + "io" + "log" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/lib/readers" +) + +// protocol errors +var ( + ErrorPrematureEOF = errors.New("Premature EOF") + ErrorInvalidLength = errors.New("Invalid length") + ErrorZeroTerminate = errors.New("String must end with zero") +) + +// BinWriter is a binary protocol writer +type BinWriter struct { + b *bytes.Buffer // growing byte buffer + a []byte // temporary buffer for next varint +} + +// NewBinWriter creates a binary protocol helper +func NewBinWriter() *BinWriter { + return &BinWriter{ + b: new(bytes.Buffer), + a: make([]byte, binary.MaxVarintLen64), + } +} + +// Bytes returns binary data +func (w *BinWriter) Bytes() []byte { + return w.b.Bytes() +} + +// Reader returns io.Reader with binary data +func (w *BinWriter) Reader() io.Reader { + return bytes.NewReader(w.b.Bytes()) +} + +// WritePu16 writes a short as unsigned varint +func (w *BinWriter) WritePu16(val int) { + if val < 0 || val > 65535 { + log.Fatalf("Invalid UInt16 %v", val) + } + w.WritePu64(int64(val)) +} + +// WritePu32 writes a signed long as unsigned varint +func (w *BinWriter) WritePu32(val int64) { + if val < 0 || val > 4294967295 { + log.Fatalf("Invalid UInt32 %v", val) + } + w.WritePu64(val) +} + +// WritePu64 writes an unsigned (actually, signed) long as unsigned varint +func (w *BinWriter) WritePu64(val int64) { + if val < 0 { + log.Fatalf("Invalid UInt64 %v", val) + } + w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))]) +} + +// WriteString writes a zero-terminated string +func (w *BinWriter) WriteString(str string) { + buf := []byte(str) + w.WritePu64(int64(len(buf) + 1)) + w.b.Write(buf) + w.b.WriteByte(0) +} + +// Write writes a byte buffer +func (w *BinWriter) Write(buf []byte) { + w.b.Write(buf) +} + +// WriteWithLength writes a byte buffer prepended with its length as varint +func (w *BinWriter) WriteWithLength(buf []byte) { + w.WritePu64(int64(len(buf))) + w.b.Write(buf) +} + +// BinReader is a binary protocol reader helper +type BinReader struct { + b *bufio.Reader + count *readers.CountingReader + err error // keeps the first error encountered +} + +// NewBinReader creates a binary protocol reader helper +func NewBinReader(reader io.Reader) *BinReader { + r := &BinReader{} + r.count = readers.NewCountingReader(reader) + r.b = bufio.NewReader(r.count) + return r +} + +// Count returns number of bytes read +func (r *BinReader) Count() uint64 { + return r.count.BytesRead() +} + +// Error returns first encountered error or nil +func (r *BinReader) Error() error { + return r.err +} + +// check() keeps the first error encountered in a stream +func (r *BinReader) check(err error) bool { + if err == nil { + return true + } + if r.err == nil { + // keep the first error + r.err = err + } + if err != io.EOF { + log.Fatalf("Error parsing response: %v", err) + } + return false +} + +// ReadByteAsInt reads a single byte as uint32, returns -1 for EOF or errors +func (r *BinReader) ReadByteAsInt() int { + if octet, err := r.b.ReadByte(); r.check(err) { + return int(octet) + } + return -1 +} + +// ReadByteAsShort reads a single byte as uint16, returns -1 for EOF or errors +func (r *BinReader) ReadByteAsShort() int16 { + if octet, err := r.b.ReadByte(); r.check(err) { + return int16(octet) + } + return -1 +} + +// ReadIntSpl reads two bytes as little-endian uint16, returns -1 for EOF or errors +func (r *BinReader) ReadIntSpl() int { + var val uint16 + if r.check(binary.Read(r.b, binary.LittleEndian, &val)) { + return int(val) + } + return -1 +} + +// ReadULong returns uint64 equivalent of -1 for EOF or errors +func (r *BinReader) ReadULong() uint64 { + if val, err := binary.ReadUvarint(r.b); r.check(err) { + return val + } + return 0xffffffffffffffff +} + +// ReadPu32 returns -1 for EOF or errors +func (r *BinReader) ReadPu32() int64 { + if val, err := binary.ReadUvarint(r.b); r.check(err) { + return int64(val) + } + return -1 +} + +// ReadNBytes reads given number of bytes, returns invalid data for EOF or errors +func (r *BinReader) ReadNBytes(len int) []byte { + buf := make([]byte, len) + n, err := r.b.Read(buf) + if r.check(err) { + return buf + } + if n != len { + r.check(ErrorPrematureEOF) + } + return buf +} + +// ReadBytesByLength reads buffer length and its bytes +func (r *BinReader) ReadBytesByLength() []byte { + len := r.ReadPu32() + if len < 0 { + r.check(ErrorInvalidLength) + return []byte{} + } + return r.ReadNBytes(int(len)) +} + +// ReadString reads a zero-terminated string with length +func (r *BinReader) ReadString() string { + len := int(r.ReadPu32()) + if len < 1 { + r.check(ErrorInvalidLength) + return "" + } + buf := make([]byte, len-1) + n, err := r.b.Read(buf) + if !r.check(err) { + return "" + } + if n != len-1 { + r.check(ErrorPrematureEOF) + return "" + } + zeroByte, err := r.b.ReadByte() + if !r.check(err) { + return "" + } + if zeroByte != 0 { + r.check(ErrorZeroTerminate) + return "" + } + return string(buf) +} + +// ReadDate reads a Unix encoded time +func (r *BinReader) ReadDate() time.Time { + return time.Unix(r.ReadPu32(), 0) +} diff --git a/backend/mailru/api/m1.go b/backend/mailru/api/m1.go new file mode 100644 index 000000000..575510583 --- /dev/null +++ b/backend/mailru/api/m1.go @@ -0,0 +1,248 @@ +package api + +import ( + "fmt" +) + +// M1 protocol constants and structures +const ( + APIServerURL = "https://cloud.mail.ru" + PublicLinkURL = "https://cloud.mail.ru/public/" + DispatchServerURL = "https://dispatcher.cloud.mail.ru" + OAuthURL = "https://o2.mail.ru/token" + OAuthClientID = "cloud-win" +) + +// ServerErrorResponse represents erroneous API response. +type ServerErrorResponse struct { + Message string `json:"body"` + Time int64 `json:"time"` + Status int `json:"status"` +} + +func (e *ServerErrorResponse) Error() string { + return fmt.Sprintf("server error %d (%s)", e.Status, e.Message) +} + +// FileErrorResponse represents erroneous API response for a file +type FileErrorResponse struct { + Body struct { + Home struct { + Value string `json:"value"` + Error string `json:"error"` + } `json:"home"` + } `json:"body"` + Status int `json:"status"` + Account string `json:"email,omitempty"` + Time int64 `json:"time,omitempty"` + Message string // non-json, calculated field +} + +func (e *FileErrorResponse) Error() string { + return fmt.Sprintf("file error %d (%s)", e.Status, e.Body.Home.Error) +} + +// UserInfoResponse contains account metadata +type UserInfoResponse struct { + Body struct { + AccountType string `json:"account_type"` + AccountVerified bool `json:"account_verified"` + Cloud struct { + Beta struct { + Allowed bool `json:"allowed"` + Asked bool `json:"asked"` + } `json:"beta"` + Billing struct { + ActiveCostID string `json:"active_cost_id"` + ActiveRateID string `json:"active_rate_id"` + AutoProlong bool `json:"auto_prolong"` + Basequota int64 `json:"basequota"` + Enabled bool `json:"enabled"` + Expires int `json:"expires"` + Prolong bool `json:"prolong"` + Promocodes struct { + } `json:"promocodes"` + Subscription []interface{} `json:"subscription"` + Version string `json:"version"` + } `json:"billing"` + Bonuses struct { + CameraUpload bool `json:"camera_upload"` + Complete bool `json:"complete"` + Desktop bool `json:"desktop"` + Feedback bool `json:"feedback"` + Links bool `json:"links"` + Mobile bool `json:"mobile"` + Registration bool `json:"registration"` + } `json:"bonuses"` + Enable struct { + Sharing bool `json:"sharing"` + } `json:"enable"` + FileSizeLimit int64 `json:"file_size_limit"` + Space struct { + BytesTotal int64 `json:"bytes_total"` + BytesUsed int `json:"bytes_used"` + Overquota bool `json:"overquota"` + } `json:"space"` + } `json:"cloud"` + Cloudflags struct { + Exists bool `json:"exists"` + } `json:"cloudflags"` + Domain string `json:"domain"` + Login string `json:"login"` + Newbie bool `json:"newbie"` + UI struct { + ExpandLoader bool `json:"expand_loader"` + Kind string `json:"kind"` + Sidebar bool `json:"sidebar"` + Sort struct { + Order string `json:"order"` + Type string `json:"type"` + } `json:"sort"` + Thumbs bool `json:"thumbs"` + } `json:"ui"` + } `json:"body"` + Email string `json:"email"` + Status int `json:"status"` + Time int64 `json:"time"` +} + +// ListItem ... +type ListItem struct { + Count struct { + Folders int `json:"folders"` + Files int `json:"files"` + } `json:"count,omitempty"` + Kind string `json:"kind"` + Type string `json:"type"` + Name string `json:"name"` + Home string `json:"home"` + Size int64 `json:"size"` + Mtime int64 `json:"mtime,omitempty"` + Hash string `json:"hash,omitempty"` + VirusScan string `json:"virus_scan,omitempty"` + Tree string `json:"tree,omitempty"` + Grev int `json:"grev,omitempty"` + Rev int `json:"rev,omitempty"` +} + +// ItemInfoResponse ... +type ItemInfoResponse struct { + Email string `json:"email"` + Body ListItem `json:"body"` + Time int64 `json:"time"` + Status int `json:"status"` +} + +// FolderInfoResponse ... +type FolderInfoResponse struct { + Body struct { + Count struct { + Folders int `json:"folders"` + Files int `json:"files"` + } `json:"count"` + Tree string `json:"tree"` + Name string `json:"name"` + Grev int `json:"grev"` + Size int64 `json:"size"` + Sort struct { + Order string `json:"order"` + Type string `json:"type"` + } `json:"sort"` + Kind string `json:"kind"` + Rev int `json:"rev"` + Type string `json:"type"` + Home string `json:"home"` + List []ListItem `json:"list"` + } `json:"body,omitempty"` + Time int64 `json:"time"` + Status int `json:"status"` + Email string `json:"email"` +} + +// ShardInfoResponse ... +type ShardInfoResponse struct { + Email string `json:"email"` + Body struct { + Video []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"video"` + ViewDirect []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"view_direct"` + WeblinkView []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"weblink_view"` + WeblinkVideo []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"weblink_video"` + WeblinkGet []struct { + Count int `json:"count"` + URL string `json:"url"` + } `json:"weblink_get"` + Stock []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"stock"` + WeblinkThumbnails []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"weblink_thumbnails"` + PublicUpload []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"public_upload"` + Auth []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"auth"` + Web []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"web"` + View []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"view"` + Upload []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"upload"` + Get []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"get"` + Thumbnails []struct { + Count string `json:"count"` + URL string `json:"url"` + } `json:"thumbnails"` + } `json:"body"` + Time int64 `json:"time"` + Status int `json:"status"` +} + +// CleanupResponse ... +type CleanupResponse struct { + Email string `json:"email"` + Time int64 `json:"time"` + StatusStr string `json:"status"` +} + +// GenericResponse ... +type GenericResponse struct { + Email string `json:"email"` + Time int64 `json:"time"` + Status int `json:"status"` + // ignore other fields +} + +// GenericBodyResponse ... +type GenericBodyResponse struct { + Email string `json:"email"` + Body string `json:"body"` + Time int64 `json:"time"` + Status int `json:"status"` +} diff --git a/backend/mailru/mailru.go b/backend/mailru/mailru.go new file mode 100644 index 000000000..594ab952a --- /dev/null +++ b/backend/mailru/mailru.go @@ -0,0 +1,2378 @@ +package mailru + +import ( + "bytes" + "context" + "fmt" + gohash "hash" + "io" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "encoding/hex" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/rclone/rclone/backend/mailru/api" + "github.com/rclone/rclone/backend/mailru/mrhash" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/object" + "github.com/rclone/rclone/fs/operations" + + "github.com/rclone/rclone/lib/oauthutil" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/rest" + + "github.com/pkg/errors" + "golang.org/x/oauth2" +) + +// Global constants +const ( + minSleepPacer = 10 * time.Millisecond + maxSleepPacer = 2 * time.Second + decayConstPacer = 2 // bigger for slower decay, exponential + metaExpirySec = 20 * 60 // meta server expiration time + serverExpirySec = 3 * 60 // download server expiration time + shardExpirySec = 30 * 60 // upload server expiration time + maxServerLocks = 4 // maximum number of locks per single download server + maxInt32 = 2147483647 // used as limit in directory list request + speedupMinSize = 512 // speedup is not optimal if data is smaller than average packet +) + +// Global errors +var ( + ErrorDirAlreadyExists = errors.New("directory already exists") + ErrorDirSourceNotExists = errors.New("directory source does not exist") + ErrorInvalidName = errors.New("invalid characters in object name") +) + +// Description of how to authorize +var oauthConfig = &oauth2.Config{ + ClientID: api.OAuthClientID, + ClientSecret: "", + Endpoint: oauth2.Endpoint{ + AuthURL: api.OAuthURL, + TokenURL: api.OAuthURL, + AuthStyle: oauth2.AuthStyleInParams, + }, +} + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "mailru", + Description: "Mail.ru Cloud", + NewFs: NewFs, + Options: []fs.Option{{ + Name: "user", + Help: "User name (usually email)", + Required: true, + }, { + Name: "pass", + Help: "Password", + Required: true, + IsPassword: true, + }, { + Name: "speedup_enable", + Default: true, + Advanced: false, + Help: `Skip full upload if there is another file with same data hash. +This feature is called "speedup" or "put by hash". It is especially efficient +in case of generally available files like popular books, video or audio clips, +because files are searched by hash in all accounts of all mailru users. +Please note that rclone may need local memory and disk space to calculate +content hash in advance and decide whether full upload is required. +Also, if rclone does not know file size in advance (e.g. in case of +streaming or partial uploads), it will not even try this optimization.`, + Examples: []fs.OptionExample{{ + Value: "true", + Help: "Enable", + }, { + Value: "false", + Help: "Disable", + }}, + }, { + Name: "speedup_file_patterns", + Default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf", + Advanced: true, + Help: `Comma separated list of file name patterns eligible for speedup (put by hash). +Patterns are case insensitive and can contain '*' or '?' meta characters.`, + Examples: []fs.OptionExample{{ + Value: "", + Help: "Empty list completely disables speedup (put by hash).", + }, { + Value: "*", + Help: "All files will be attempted for speedup.", + }, { + Value: "*.mkv,*.avi,*.mp4,*.mp3", + Help: "Only common audio/video files will be tried for put by hash.", + }, { + Value: "*.zip,*.gz,*.rar,*.pdf", + Help: "Only common archives or PDF books will be tried for speedup.", + }}, + }, { + Name: "speedup_max_disk", + Default: fs.SizeSuffix(3 * 1024 * 1024 * 1024), + Advanced: true, + Help: `This option allows you to disable speedup (put by hash) for large files +(because preliminary hashing can exhaust you RAM or disk space)`, + Examples: []fs.OptionExample{{ + Value: "0", + Help: "Completely disable speedup (put by hash).", + }, { + Value: "1G", + Help: "Files larger than 1Gb will be uploaded directly.", + }, { + Value: "3G", + Help: "Choose this option if you have less than 3Gb free on local disk.", + }}, + }, { + Name: "speedup_max_memory", + Default: fs.SizeSuffix(32 * 1024 * 1024), + Advanced: true, + Help: `Files larger than the size given below will always be hashed on disk.`, + Examples: []fs.OptionExample{{ + Value: "0", + Help: "Preliminary hashing will always be done in a temporary disk location.", + }, { + Value: "32M", + Help: "Do not dedicate more than 32Mb RAM for preliminary hashing.", + }, { + Value: "256M", + Help: "You have at most 256Mb RAM free for hash calculations.", + }}, + }, { + Name: "check_hash", + Default: true, + Advanced: true, + Help: "What should copy do if file checksum is mismatched or invalid", + Examples: []fs.OptionExample{{ + Value: "true", + Help: "Fail with error.", + }, { + Value: "false", + Help: "Ignore and continue.", + }}, + }, { + Name: "user_agent", + Default: "", + Advanced: true, + Hide: fs.OptionHideBoth, + Help: `HTTP user agent used internally by client. +Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`, + }, { + Name: "quirks", + Default: "", + Advanced: true, + Hide: fs.OptionHideBoth, + Help: `Comma separated list of internal maintenance flags. +This option must not be used by an ordinary user. It is intended only to +facilitate remote troubleshooting of backend issues. Strict meaning of +flags is not documented and not guaranteed to persist between releases. +Quirks will be removed when the backend grows stable. +Supported quirks: atomicmkdir binlist gzip insecure retry400`, + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + Username string `config:"user"` + Password string `config:"pass"` + UserAgent string `config:"user_agent"` + CheckHash bool `config:"check_hash"` + SpeedupEnable bool `config:"speedup_enable"` + SpeedupPatterns string `config:"speedup_file_patterns"` + SpeedupMaxDisk fs.SizeSuffix `config:"speedup_max_disk"` + SpeedupMaxMem fs.SizeSuffix `config:"speedup_max_memory"` + Quirks string `config:"quirks"` +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout + 509, // Bandwidth Limit Exceeded +} + +// shouldRetry returns a boolean as to whether this response and err +// deserve to be retried. It returns the err as a convenience. +// Retries password authorization (once) in a special case of access denied. +func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) { + if res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed { + reAuthErr := f.reAuthorize(opts, err) + return reAuthErr == nil, err // return an original error + } + if f.quirks.retry400 && res.StatusCode == 400 { + return true, err + } + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err +} + +// errorHandler parses a non 2xx error response into an error +func errorHandler(res *http.Response) (err error) { + data, err := rest.ReadBody(res) + if err != nil { + return err + } + fileError := &api.FileErrorResponse{} + err = json.NewDecoder(bytes.NewReader(data)).Decode(fileError) + if err == nil { + fileError.Message = fileError.Body.Home.Error + return fileError + } + serverError := &api.ServerErrorResponse{} + err = json.NewDecoder(bytes.NewReader(data)).Decode(serverError) + if err == nil { + return serverError + } + serverError.Message = string(data) + if serverError.Message == "" || strings.HasPrefix(serverError.Message, "{") { + // Replace empty or JSON response with a human readable text. + serverError.Message = res.Status + } + serverError.Status = res.StatusCode + return serverError +} + +// Fs represents a remote mail.ru +type Fs struct { + name string + root string // root path + opt Options // parsed options + speedupGlobs []string // list of file name patterns eligible for speedup + speedupAny bool // true if all file names are aligible for speedup + features *fs.Features // optional features + srv *rest.Client // REST API client + cli *http.Client // underlying HTTP client (for authorize) + m configmap.Mapper // config reader (for authorize) + source oauth2.TokenSource // OAuth token refresher + pacer *fs.Pacer // pacer for API calls + metaMu sync.Mutex // lock for meta server switcher + metaURL string // URL of meta server + metaExpiry time.Time // time to refresh meta server + shardMu sync.Mutex // lock for upload shard switcher + shardURL string // URL of upload shard + shardExpiry time.Time // time to refresh upload shard + fileServers serverPool // file server dispatcher + authMu sync.Mutex // mutex for authorize() + passFailed bool // true if authorize() failed after 403 + quirks quirks // internal maintenance flags +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + fs.Debugf(nil, ">>> NewFs %q %q", name, root) + ctx := context.Background() // Note: NewFs does not pass context! + + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + if opt.Password != "" { + opt.Password = obscure.MustReveal(opt.Password) + } + + // Trailing slash signals us to optimize out one file check + rootIsDir := strings.HasSuffix(root, "/") + // However the f.root string should not have leading or trailing slashes + root = strings.Trim(root, "/") + + f := &Fs{ + name: name, + root: root, + opt: *opt, + m: m, + } + + if err := f.parseSpeedupPatterns(opt.SpeedupPatterns); err != nil { + return nil, err + } + f.quirks.parseQuirks(opt.Quirks) + + f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer))) + + f.features = (&fs.Features{ + CaseInsensitive: true, + CanHaveEmptyDirectories: true, + // Can copy/move across mailru configs (almost, thus true here), but + // only when they share common account (this is checked in Copy/Move). + ServerSideAcrossConfigs: true, + }).Fill(f) + + // Override few config settings and create a client + clientConfig := *fs.Config + if opt.UserAgent != "" { + clientConfig.UserAgent = opt.UserAgent + } + clientConfig.NoGzip = !f.quirks.gzip // Send not "Accept-Encoding: gzip" like official client + f.cli = fshttp.NewClient(&clientConfig) + + f.srv = rest.NewClient(f.cli) + f.srv.SetRoot(api.APIServerURL) + f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client + f.srv.SetErrorHandler(errorHandler) + + if f.quirks.insecure { + transport := f.cli.Transport.(*fshttp.Transport).Transport + transport.TLSClientConfig.InsecureSkipVerify = true + transport.ProxyConnectHeader = http.Header{"User-Agent": {clientConfig.UserAgent}} + } + + if err = f.authorize(ctx, false); err != nil { + return nil, err + } + + f.fileServers = serverPool{ + pool: make(pendingServerMap), + fs: f, + path: "/d", + expirySec: serverExpirySec, + } + + if !rootIsDir { + _, dirSize, err := f.readItemMetaData(ctx, f.root) + rootIsDir = (dirSize >= 0) + // Ignore non-existing item and other errors + if err == nil && !rootIsDir { + root = path.Dir(f.root) + if root == "." { + root = "" + } + f.root = root + // Return fs that points to the parent and signal rclone to do filtering + return f, fs.ErrorIsFile + } + } + + return f, nil +} + +// Internal maintenance flags (to be removed when the backend matures). +// Primarily intended to facilitate remote support and troubleshooting. +type quirks struct { + gzip bool + insecure bool + binlist bool + atomicmkdir bool + retry400 bool +} + +func (q *quirks) parseQuirks(option string) { + for _, flag := range strings.Split(option, ",") { + switch strings.ToLower(strings.TrimSpace(flag)) { + case "gzip": + // This backend mimics the official client which never sends the + // "Accept-Encoding: gzip" header. However, enabling compression + // might be good for performance. + // Use this quirk to investigate the performance impact. + // Remove this quirk if perfomance does not improve. + q.gzip = true + case "insecure": + // The mailru disk-o protocol is not documented. To compare HTTP + // stream against the official client one can use Telerik Fiddler, + // which introduces a self-signed certificate. This quirk forces + // the Go http layer to accept it. + // Remove this quirk when the backend reaches maturity. + q.insecure = true + case "binlist": + // The official client sometimes uses a so called "bin" protocol, + // implemented in the listBin file system method below. This method + // is generally faster than non-recursive listM1 but results in + // sporadic deserialization failures if total size of tree data + // approaches 8Kb (?). The recursive method is normally disabled. + // This quirk can be used to enable it for further investigation. + // Remove this quirk when the "bin" protocol support is complete. + q.binlist = true + case "atomicmkdir": + // At the moment rclone requires Mkdir to return success if the + // directory already exists. However, such programs as borgbackup + // or restic use mkdir as a locking primitive and depend on its + // atomicity. This quirk is a workaround. It can be removed + // when the above issue is investigated. + q.atomicmkdir = true + case "retry400": + // This quirk will help in troubleshooting a very rare "Error 400" + // issue. It can be removed if the problem does not show up + // for a year or so. See the below issue: + // https://github.com/ivandeex/rclone/issues/14 + q.retry400 = true + default: + // Just ignore all unknown flags + } + } +} + +// Note: authorize() is not safe for concurrent access as it updates token source +func (f *Fs) authorize(ctx context.Context, force bool) (err error) { + var t *oauth2.Token + if !force { + t, err = oauthutil.GetToken(f.name, f.m) + } + + if err != nil || !tokenIsValid(t) { + fs.Infof(f, "Valid token not found, authorizing.") + ctx := oauthutil.Context(f.cli) + t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password) + } + if err == nil && !tokenIsValid(t) { + err = errors.New("Invalid token") + } + if err != nil { + return errors.Wrap(err, "Failed to authorize") + } + + if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil { + return err + } + + // Mailru API server expects access token not in the request header but + // in the URL query string, so we must use a bare token source rather than + // client provided by oauthutil. + // + // WARNING: direct use of the returned token source triggers a bug in the + // `(*token != *ts.token)` comparison in oauthutil.TokenSource.Token() + // crashing with panic `comparing uncomparable type map[string]interface{}` + // As a workaround, mimic oauth2.NewClient() wrapping token source in + // oauth2.ReuseTokenSource + _, ts, err := oauthutil.NewClientWithBaseClient(f.name, f.m, oauthConfig, f.cli) + if err == nil { + f.source = oauth2.ReuseTokenSource(nil, ts) + } + return err +} + +func tokenIsValid(t *oauth2.Token) bool { + return t.Valid() && t.RefreshToken != "" && t.Type() == "Bearer" +} + +// reAuthorize is called after getting 403 (access denied) from the server. +// It handles the case when user has changed password since a previous +// rclone invocation and obtains a new access token, if needed. +func (f *Fs) reAuthorize(opts *rest.Opts, origErr error) error { + // lock and recheck the flag to ensure authorize() is attempted only once + f.authMu.Lock() + defer f.authMu.Unlock() + if f.passFailed { + return origErr + } + ctx := context.Background() // Note: reAuthorize is called by ShouldRetry, no context! + + fs.Debugf(f, "re-authorize with new password") + if err := f.authorize(ctx, true); err != nil { + f.passFailed = true + return err + } + + // obtain new token, if needed + tokenParameter := "" + if opts != nil && opts.Parameters.Get("token") != "" { + tokenParameter = "token" + } + if opts != nil && opts.Parameters.Get("access_token") != "" { + tokenParameter = "access_token" + } + if tokenParameter != "" { + token, err := f.accessToken() + if err != nil { + f.passFailed = true + return err + } + opts.Parameters.Set(tokenParameter, token) + } + + return nil +} + +// accessToken() returns OAuth token and possibly refreshes it +func (f *Fs) accessToken() (string, error) { + token, err := f.source.Token() + if err != nil { + return "", errors.Wrap(err, "cannot refresh access token") + } + return token.AccessToken, nil +} + +// absPath converts root-relative remote to absolute home path +func (f *Fs) absPath(remote string) string { + return "/" + path.Join(f.root, strings.Trim(remote, "/")) +} + +// relPath converts absolute home path to root-relative remote +// Note that f.root can not have leading and trailing slashes +func (f *Fs) relPath(absPath string) (string, error) { + target := strings.Trim(absPath, "/") + if f.root == "" { + return target, nil + } + if target == f.root { + return "", nil + } + if strings.HasPrefix(target+"/", f.root+"/") { + return target[len(f.root)+1:], nil + } + return "", fmt.Errorf("path %q should be under %q", absPath, f.root) +} + +// metaServer ... +func (f *Fs) metaServer() (string, error) { + f.metaMu.Lock() + defer f.metaMu.Unlock() + + if f.metaURL != "" && time.Now().Before(f.metaExpiry) { + return f.metaURL, nil + } + + opts := rest.Opts{ + RootURL: api.DispatchServerURL, + Method: "GET", + Path: "/m", + } + + var ( + res *http.Response + url string + err error + ) + err = f.pacer.Call(func() (bool, error) { + res, err = f.srv.Call(&opts) + if err == nil { + url, err = readBodyWord(res) + } + return fserrors.ShouldRetry(err), err + }) + if err != nil { + closeBody(res) + return "", err + } + f.metaURL = url + f.metaExpiry = time.Now().Add(metaExpirySec * time.Second) + fs.Debugf(f, "new meta server: %s", f.metaURL) + return f.metaURL, nil +} + +// readBodyWord reads the single line response to completion +// and extracts the first word from the first line. +func readBodyWord(res *http.Response) (word string, err error) { + var body []byte + body, err = rest.ReadBody(res) + if err == nil { + line := strings.Trim(string(body), " \r\n") + word = strings.Split(line, " ")[0] + } + if word == "" { + return "", errors.New("Empty reply from dispatcher") + } + return word, nil +} + +// readItemMetaData returns a file/directory info at given full path +// If it can't be found it fails with fs.ErrorObjectNotFound +// For the return value `dirSize` please see Fs.itemToEntry() +func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEntry, dirSize int, err error) { + token, err := f.accessToken() + if err != nil { + return nil, -1, err + } + + opts := rest.Opts{ + Method: "GET", + Path: "/api/m1/file", + Parameters: url.Values{ + "access_token": {token}, + "home": {path}, + "offset": {"0"}, + "limit": {strconv.Itoa(maxInt32)}, + }, + } + + var info api.ItemInfoResponse + err = f.pacer.Call(func() (bool, error) { + res, err := f.srv.CallJSON(&opts, nil, &info) + return shouldRetry(res, err, f, &opts) + }) + + if err != nil { + if apiErr, ok := err.(*api.FileErrorResponse); ok { + switch apiErr.Status { + case 404: + err = fs.ErrorObjectNotFound + case 400: + fs.Debugf(f, "object %q status %d (%s)", path, apiErr.Status, apiErr.Message) + err = fs.ErrorObjectNotFound + } + } + return + } + + entry, dirSize, err = f.itemToDirEntry(ctx, &info.Body) + return +} + +// itemToEntry converts API item to rclone directory entry +// The dirSize return value is: +// <0 - for a file or in case of error +// =0 - for an empty directory +// >0 - for a non-empty directory +func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) { + remote, err := f.relPath(item.Home) + if err != nil { + return nil, -1, err + } + switch item.Kind { + case "folder": + dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size) + dirSize := item.Count.Files + item.Count.Folders + return dir, dirSize, nil + case "file": + binHash, err := mrhash.DecodeString(item.Hash) + if err != nil { + return nil, -1, err + } + file := &Object{ + fs: f, + remote: remote, + hasMetaData: true, + size: item.Size, + mrHash: binHash, + modTime: time.Unix(item.Mtime, 0), + } + return file, -1, nil + default: + return nil, -1, fmt.Errorf("Unknown resource type %q", item.Kind) + } +} + +// List the objects and directories in dir into entries. +// The entries can be returned in any order but should be for a complete directory. +// dir should be "" to list the root, and should not have trailing slashes. +// This should return ErrDirNotFound if the directory isn't found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + fs.Debugf(f, ">>> List: %q", dir) + + if f.quirks.binlist { + entries, err = f.listBin(ctx, f.absPath(dir), 1) + } else { + entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32) + } + + if err == nil && fs.Config.LogLevel >= fs.LogLevelDebug { + names := []string{} + for _, entry := range entries { + names = append(names, entry.Remote()) + } + sort.Strings(names) + fs.Debugf(f, "List(%q): %v", dir, names) + } + + return +} + +// list using protocol "m1" +func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int) (entries fs.DirEntries, err error) { + token, err := f.accessToken() + if err != nil { + return nil, err + } + + params := url.Values{} + params.Set("access_token", token) + params.Set("offset", strconv.Itoa(offset)) + params.Set("limit", strconv.Itoa(limit)) + + data := url.Values{} + data.Set("home", dirPath) + + opts := rest.Opts{ + Method: "POST", + Path: "/api/m1/folder", + Parameters: params, + Body: strings.NewReader(data.Encode()), + ContentType: api.BinContentType, + } + + var ( + info api.FolderInfoResponse + res *http.Response + ) + err = f.pacer.Call(func() (bool, error) { + res, err = f.srv.CallJSON(&opts, nil, &info) + return shouldRetry(res, err, f, &opts) + }) + + if err != nil { + apiErr, ok := err.(*api.FileErrorResponse) + if ok && apiErr.Status == 404 { + return nil, fs.ErrorDirNotFound + } + return nil, err + } + + if info.Body.Kind != "folder" { + return nil, fs.ErrorIsFile + } + + for _, item := range info.Body.List { + entry, _, err := f.itemToDirEntry(ctx, &item) + if err == nil { + entries = append(entries, entry) + } else { + fs.Debugf(f, "Excluding path %q from list: %v", item.Home, err) + } + } + return entries, nil +} + +// list using protocol "bin" +func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs.DirEntries, err error) { + options := api.ListOptDefaults + + req := api.NewBinWriter() + req.WritePu16(api.OperationFolderList) + req.WriteString(dirPath) + req.WritePu32(int64(depth)) + req.WritePu32(int64(options)) + req.WritePu32(0) + + token, err := f.accessToken() + if err != nil { + return nil, err + } + metaURL, err := f.metaServer() + if err != nil { + return nil, err + } + + opts := rest.Opts{ + Method: "POST", + RootURL: metaURL, + Parameters: url.Values{ + "client_id": {api.OAuthClientID}, + "token": {token}, + }, + ContentType: api.BinContentType, + Body: req.Reader(), + } + + var res *http.Response + err = f.pacer.Call(func() (bool, error) { + res, err = f.srv.Call(&opts) + return shouldRetry(res, err, f, &opts) + }) + if err != nil { + closeBody(res) + return nil, err + } + + r := api.NewBinReader(res.Body) + defer closeBody(res) + + // read status + switch status := r.ReadByteAsInt(); status { + case api.ListResultOK: + // go on... + case api.ListResultNotExists: + return nil, fs.ErrorDirNotFound + default: + return nil, fmt.Errorf("directory list error %d", status) + } + + t := &treeState{ + f: f, + r: r, + options: options, + rootDir: parentDir(dirPath), + lastDir: "", + level: 0, + } + t.currDir = t.rootDir + + // read revision + if err := t.revision.Read(r); err != nil { + return nil, err + } + + // read space + if (options & api.ListOptTotalSpace) != 0 { + t.totalSpace = int64(r.ReadULong()) + } + if (options & api.ListOptUsedSpace) != 0 { + t.usedSpace = int64(r.ReadULong()) + } + + t.fingerprint = r.ReadBytesByLength() + + // deserialize + for { + entry, err := t.NextRecord() + if err != nil { + break + } + if entry != nil { + entries = append(entries, entry) + } + } + if err != nil && err != fs.ErrorListAborted { + fs.Debugf(f, "listBin failed at offset %d: %v", r.Count(), err) + return nil, err + } + return entries, nil +} + +func (t *treeState) NextRecord() (fs.DirEntry, error) { + r := t.r + parseOp := r.ReadByteAsShort() + if r.Error() != nil { + return nil, r.Error() + } + + switch parseOp { + case api.ListParseDone: + return nil, fs.ErrorListAborted + case api.ListParsePin: + if t.lastDir == "" { + return nil, errors.New("last folder is null") + } + t.currDir = t.lastDir + t.level++ + return nil, nil + case api.ListParsePinUpper: + if t.currDir == t.rootDir { + return nil, nil + } + if t.level <= 0 { + return nil, errors.New("no parent folder") + } + t.currDir = parentDir(t.currDir) + t.level-- + return nil, nil + case api.ListParseUnknown15: + skip := int(r.ReadPu32()) + for i := 0; i < skip; i++ { + r.ReadPu32() + r.ReadPu32() + } + return nil, nil + case api.ListParseReadItem: + // get item (see below) + default: + return nil, fmt.Errorf("unknown parse operation %d", parseOp) + } + + // get item + head := r.ReadIntSpl() + itemType := head & 3 + if (head & 4096) != 0 { + t.dunnoNodeID = r.ReadNBytes(api.DunnoNodeIDLength) + } + name := string(r.ReadBytesByLength()) + t.dunno1 = int(r.ReadULong()) + t.dunno2 = 0 + t.dunno3 = 0 + + if r.Error() != nil { + return nil, r.Error() + } + + var ( + modTime time.Time + size int64 + binHash []byte + dirSize int64 + isDir = true + ) + + switch itemType { + case api.ListItemMountPoint: + t.treeID = r.ReadNBytes(api.TreeIDLength) + t.dunno2 = int(r.ReadULong()) + t.dunno3 = int(r.ReadULong()) + case api.ListItemFolder: + t.dunno2 = int(r.ReadULong()) + case api.ListItemSharedFolder: + t.dunno2 = int(r.ReadULong()) + t.treeID = r.ReadNBytes(api.TreeIDLength) + case api.ListItemFile: + isDir = false + modTime = r.ReadDate() + size = int64(r.ReadULong()) + binHash = r.ReadNBytes(mrhash.Size) + default: + return nil, fmt.Errorf("unknown item type %d", itemType) + } + + if isDir { + t.lastDir = path.Join(t.currDir, name) + if (t.options & api.ListOptDelete) != 0 { + t.dunnoDel1 = int(r.ReadPu32()) + t.dunnoDel2 = int(r.ReadPu32()) + } + if (t.options & api.ListOptFolderSize) != 0 { + dirSize = int64(r.ReadULong()) + } + } + + if r.Error() != nil { + return nil, r.Error() + } + + if fs.Config.LogLevel >= fs.LogLevelDebug { + ctime, _ := modTime.MarshalJSON() + fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime) + } + + if t.level != 1 { + // TODO: implement recursion and ListR + // Note: recursion is broken because maximum buffer size is 8K + return nil, nil + } + + remote, err := t.f.relPath(path.Join(t.currDir, name)) + if err != nil { + return nil, err + } + if isDir { + return fs.NewDir(remote, modTime).SetSize(dirSize), nil + } + obj := &Object{ + fs: t.f, + remote: remote, + hasMetaData: true, + size: size, + mrHash: binHash, + modTime: modTime, + } + return obj, nil +} + +type treeState struct { + f *Fs + r *api.BinReader + options int + rootDir string + currDir string + lastDir string + level int + revision treeRevision + totalSpace int64 + usedSpace int64 + fingerprint []byte + dunno1 int + dunno2 int + dunno3 int + dunnoDel1 int + dunnoDel2 int + dunnoNodeID []byte + treeID []byte +} + +type treeRevision struct { + ver int16 + treeID []byte + treeIDNew []byte + bgn uint64 + bgnNew uint64 +} + +func (rev *treeRevision) Read(data *api.BinReader) error { + rev.ver = data.ReadByteAsShort() + switch rev.ver { + case 0: + // Revision() + case 1, 2: + rev.treeID = data.ReadNBytes(api.TreeIDLength) + rev.bgn = data.ReadULong() + case 3, 4: + rev.treeID = data.ReadNBytes(api.TreeIDLength) + rev.bgn = data.ReadULong() + rev.treeIDNew = data.ReadNBytes(api.TreeIDLength) + rev.bgnNew = data.ReadULong() + case 5: + rev.treeID = data.ReadNBytes(api.TreeIDLength) + rev.bgn = data.ReadULong() + rev.treeIDNew = data.ReadNBytes(api.TreeIDLength) + default: + return fmt.Errorf("unknown directory revision %d", rev.ver) + } + return data.Error() +} + +// CreateDir makes a directory (parent must exist) +func (f *Fs) CreateDir(ctx context.Context, path string) error { + fs.Debugf(f, ">>> CreateDir %q", path) + + req := api.NewBinWriter() + req.WritePu16(api.OperationCreateFolder) + req.WritePu16(0) // revision + req.WriteString(path) + req.WritePu32(0) + + token, err := f.accessToken() + if err != nil { + return err + } + metaURL, err := f.metaServer() + if err != nil { + return err + } + + opts := rest.Opts{ + Method: "POST", + RootURL: metaURL, + Parameters: url.Values{ + "client_id": {api.OAuthClientID}, + "token": {token}, + }, + ContentType: api.BinContentType, + Body: req.Reader(), + } + + var res *http.Response + err = f.pacer.Call(func() (bool, error) { + res, err = f.srv.Call(&opts) + return shouldRetry(res, err, f, &opts) + }) + if err != nil { + closeBody(res) + return err + } + + reply := api.NewBinReader(res.Body) + defer closeBody(res) + + switch status := reply.ReadByteAsInt(); status { + case api.MkdirResultOK: + return nil + case api.MkdirResultAlreadyExists, api.MkdirResultExistsDifferentCase: + return ErrorDirAlreadyExists + case api.MkdirResultSourceNotExists: + return ErrorDirSourceNotExists + case api.MkdirResultInvalidName: + return ErrorInvalidName + default: + return fmt.Errorf("mkdir error %d", status) + } +} + +// Mkdir creates the container (and its parents) if it doesn't exist. +// Normally it ignores the ErrorDirAlreadyExist, as required by rclone tests. +// Nevertheless, such programs as borgbackup or restic use mkdir as a locking +// primitive and depend on its atomicity, i.e. mkdir should fail if directory +// already exists. As a workaround, users can add string "atomicmkdir" in the +// hidden `quirks` parameter or in the `--mailru-quirks` command-line option. +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + fs.Debugf(f, ">>> Mkdir %q", dir) + err := f.mkDirs(ctx, f.absPath(dir)) + if err == ErrorDirAlreadyExists && !f.quirks.atomicmkdir { + return nil + } + return err +} + +// mkDirs creates container and its parents by absolute path, +// fails with ErrorDirAlreadyExists if it already exists. +func (f *Fs) mkDirs(ctx context.Context, path string) error { + if path == "/" || path == "" { + return nil + } + switch err := f.CreateDir(ctx, path); err { + case nil: + return nil + case ErrorDirSourceNotExists: + fs.Debugf(f, "mkDirs by part %q", path) + // fall thru... + default: + return err + } + parts := strings.Split(strings.Trim(path, "/"), "/") + path = "" + for _, part := range parts { + if part == "" { + continue + } + path += "/" + part + switch err := f.CreateDir(ctx, path); err { + case nil, ErrorDirAlreadyExists: + continue + default: + return err + } + } + return nil +} + +func parentDir(absPath string) string { + parent := path.Dir(strings.TrimRight(absPath, "/")) + if parent == "." { + parent = "" + } + return parent +} + +// mkParentDirs creates parent containers by absolute path, +// ignores the ErrorDirAlreadyExists +func (f *Fs) mkParentDirs(ctx context.Context, path string) error { + err := f.mkDirs(ctx, parentDir(path)) + if err == ErrorDirAlreadyExists { + return nil + } + return err +} + +// Rmdir deletes a directory. +// Returns an error if it isn't empty. +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + fs.Debugf(f, ">>> Rmdir %q", dir) + return f.purgeWithCheck(ctx, dir, true, "rmdir") +} + +// Purge deletes all the files and the root directory +// Optional interface: Only implement this if you have a way of deleting +// all the files quicker than just running Remove() on the result of List() +func (f *Fs) Purge(ctx context.Context) error { + fs.Debugf(f, ">>> Purge") + return f.purgeWithCheck(ctx, "", false, "purge") +} + +// purgeWithCheck() removes the root directory. +// Refuses if `check` is set and directory has anything in. +func (f *Fs) purgeWithCheck(ctx context.Context, dir string, check bool, opName string) error { + path := f.absPath(dir) + if path == "/" || path == "" { + // Mailru will not allow to purge root space returning status 400 + return fs.ErrorNotDeletingDirs + } + + _, dirSize, err := f.readItemMetaData(ctx, path) + if err != nil { + return errors.Wrapf(err, "%s failed", opName) + } + if check && dirSize > 0 { + return fs.ErrorDirectoryNotEmpty + } + return f.delete(ctx, path, false) +} + +func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error { + token, err := f.accessToken() + if err != nil { + return err + } + + data := url.Values{"home": {path}} + opts := rest.Opts{ + Method: "POST", + Path: "/api/m1/file/remove", + Parameters: url.Values{ + "access_token": {token}, + }, + Body: strings.NewReader(data.Encode()), + ContentType: api.BinContentType, + } + + var response api.GenericResponse + err = f.pacer.Call(func() (bool, error) { + res, err := f.srv.CallJSON(&opts, nil, &response) + return shouldRetry(res, err, f, &opts) + }) + + switch { + case err != nil: + return err + case response.Status == 200: + return nil + default: + return fmt.Errorf("delete failed with code %d", response.Status) + } +} + +// Copy src to this remote using server side copy operations. +// This is stored with the remote path given. +// It returns the destination Object and a possible error. +// Will only be called if src.Fs().Name() == f.Name() +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + fs.Debugf(f, ">>> Copy %q %q", src.Remote(), remote) + + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + if srcObj.fs.opt.Username != f.opt.Username { + // Can copy across mailru configs only if they share common account + fs.Debugf(src, "Can't copy - not same account") + return nil, fs.ErrorCantCopy + } + + srcPath := srcObj.absPath() + dstPath := f.absPath(remote) + overwrite := false + fs.Debugf(f, "copy %q -> %q\n", srcPath, dstPath) + + err := f.mkParentDirs(ctx, dstPath) + if err != nil { + return nil, err + } + + data := url.Values{} + data.Set("home", srcPath) + data.Set("folder", parentDir(dstPath)) + data.Set("email", f.opt.Username) + data.Set("x-email", f.opt.Username) + + if overwrite { + data.Set("conflict", "rewrite") + } else { + data.Set("conflict", "rename") + } + + token, err := f.accessToken() + if err != nil { + return nil, err + } + + opts := rest.Opts{ + Method: "POST", + Path: "/api/m1/file/copy", + Parameters: url.Values{ + "access_token": {token}, + }, + Body: strings.NewReader(data.Encode()), + ContentType: api.BinContentType, + } + + var response api.GenericBodyResponse + err = f.pacer.Call(func() (bool, error) { + res, err := f.srv.CallJSON(&opts, nil, &response) + return shouldRetry(res, err, f, &opts) + }) + + if err != nil { + return nil, errors.Wrap(err, "couldn't copy file") + } + if response.Status != 200 { + return nil, fmt.Errorf("copy failed with code %d", response.Status) + } + + tmpPath := response.Body + if tmpPath != dstPath { + fs.Debugf(f, "rename temporary file %q -> %q\n", tmpPath, dstPath) + err = f.moveItemBin(ctx, tmpPath, dstPath, "rename temporary file") + if err != nil { + _ = f.delete(ctx, tmpPath, false) // ignore error + return nil, err + } + } + + // fix modification time at destination + dstObj := &Object{ + fs: f, + remote: remote, + } + err = dstObj.readMetaData(ctx, true) + if err == nil && dstObj.modTime != srcObj.modTime { + dstObj.modTime = srcObj.modTime + err = dstObj.addFileMetaData(ctx, true) + } + if err != nil { + dstObj = nil + } + return dstObj, err +} + +// Move src to this remote using server side move operations. +// This is stored with the remote path given. +// It returns the destination Object and a possible error. +// Will only be called if src.Fs().Name() == f.Name() +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + fs.Debugf(f, ">>> Move %q %q", src.Remote(), remote) + + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + if srcObj.fs.opt.Username != f.opt.Username { + // Can move across mailru configs only if they share common account + fs.Debugf(src, "Can't move - not same account") + return nil, fs.ErrorCantMove + } + + srcPath := srcObj.absPath() + dstPath := f.absPath(remote) + + err := f.mkParentDirs(ctx, dstPath) + if err != nil { + return nil, err + } + + err = f.moveItemBin(ctx, srcPath, dstPath, "move file") + if err != nil { + return nil, err + } + + return f.NewObject(ctx, remote) +} + +// move/rename an object using BIN protocol +func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) error { + token, err := f.accessToken() + if err != nil { + return err + } + metaURL, err := f.metaServer() + if err != nil { + return err + } + + req := api.NewBinWriter() + req.WritePu16(api.OperationRename) + req.WritePu32(0) // old revision + req.WriteString(srcPath) + req.WritePu32(0) // new revision + req.WriteString(dstPath) + req.WritePu32(0) // dunno + + opts := rest.Opts{ + Method: "POST", + RootURL: metaURL, + Parameters: url.Values{ + "client_id": {api.OAuthClientID}, + "token": {token}, + }, + ContentType: api.BinContentType, + Body: req.Reader(), + } + + var res *http.Response + err = f.pacer.Call(func() (bool, error) { + res, err = f.srv.Call(&opts) + return shouldRetry(res, err, f, &opts) + }) + if err != nil { + closeBody(res) + return err + } + + reply := api.NewBinReader(res.Body) + defer closeBody(res) + + switch status := reply.ReadByteAsInt(); status { + case api.MoveResultOK: + return nil + default: + return fmt.Errorf("%s failed with error %d", opName, status) + } +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server side move operations. +// Will only be called if src.Fs().Name() == f.Name() +// If it isn't possible then return fs.ErrorCantDirMove +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + fs.Debugf(f, ">>> DirMove %q %q", srcRemote, dstRemote) + + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + if srcFs.opt.Username != f.opt.Username { + // Can move across mailru configs only if they share common account + fs.Debugf(src, "Can't move - not same account") + return fs.ErrorCantDirMove + } + srcPath := srcFs.absPath(srcRemote) + dstPath := f.absPath(dstRemote) + fs.Debugf(srcFs, "DirMove [%s]%q --> [%s]%q\n", srcRemote, srcPath, dstRemote, dstPath) + + // Refuse to move to or from the root + if len(srcPath) <= len(srcFs.root) || len(dstPath) <= len(f.root) { + fs.Debugf(src, "DirMove error: Can't move root") + return errors.New("can't move root directory") + } + + err := f.mkParentDirs(ctx, dstPath) + if err != nil { + return err + } + + _, _, err = f.readItemMetaData(ctx, dstPath) + switch err { + case fs.ErrorObjectNotFound: + // OK! + case nil: + return fs.ErrorDirExists + default: + return err + } + + return f.moveItemBin(ctx, srcPath, dstPath, "directory move") +} + +// PublicLink generates a public link to the remote path (usually readable by anyone) +func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { + fs.Debugf(f, ">>> PublicLink %q", remote) + + token, err := f.accessToken() + if err != nil { + return "", err + } + + data := url.Values{} + data.Set("home", f.absPath(remote)) + data.Set("email", f.opt.Username) + data.Set("x-email", f.opt.Username) + + opts := rest.Opts{ + Method: "POST", + Path: "/api/m1/file/publish", + Parameters: url.Values{ + "access_token": {token}, + }, + Body: strings.NewReader(data.Encode()), + ContentType: api.BinContentType, + } + + var response api.GenericBodyResponse + err = f.pacer.Call(func() (bool, error) { + res, err := f.srv.CallJSON(&opts, nil, &response) + return shouldRetry(res, err, f, &opts) + }) + + if err == nil && response.Body != "" { + return api.PublicLinkURL + response.Body, nil + } + if err == nil { + return "", errors.New("server returned empty link") + } + if apiErr, ok := err.(*api.FileErrorResponse); ok && apiErr.Status == 404 { + return "", fs.ErrorObjectNotFound + } + return "", err +} + +// CleanUp permanently deletes all trashed files/folders +func (f *Fs) CleanUp(ctx context.Context) error { + fs.Debugf(f, ">>> CleanUp") + + token, err := f.accessToken() + if err != nil { + return err + } + + data := url.Values{ + "email": {f.opt.Username}, + "x-email": {f.opt.Username}, + } + opts := rest.Opts{ + Method: "POST", + Path: "/api/m1/trashbin/empty", + Parameters: url.Values{ + "access_token": {token}, + }, + Body: strings.NewReader(data.Encode()), + ContentType: api.BinContentType, + } + + var response api.CleanupResponse + err = f.pacer.Call(func() (bool, error) { + res, err := f.srv.CallJSON(&opts, nil, &response) + return shouldRetry(res, err, f, &opts) + }) + if err != nil { + return err + } + + switch response.StatusStr { + case "200": + return nil + default: + return fmt.Errorf("cleanup failed (%s)", response.StatusStr) + } +} + +// About gets quota information +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { + fs.Debugf(f, ">>> About") + + token, err := f.accessToken() + if err != nil { + return nil, err + } + opts := rest.Opts{ + Method: "GET", + Path: "/api/m1/user", + Parameters: url.Values{ + "access_token": {token}, + }, + } + + var info api.UserInfoResponse + err = f.pacer.Call(func() (bool, error) { + res, err := f.srv.CallJSON(&opts, nil, &info) + return shouldRetry(res, err, f, &opts) + }) + if err != nil { + return nil, err + } + + total := info.Body.Cloud.Space.BytesTotal + used := int64(info.Body.Cloud.Space.BytesUsed) + + usage := &fs.Usage{ + Total: fs.NewUsageValue(total), + Used: fs.NewUsageValue(used), + Free: fs.NewUsageValue(total - used), + } + return usage, nil +} + +// Put the object +// Copy the reader in to the new object which is returned +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + o := &Object{ + fs: f, + remote: src.Remote(), + size: src.Size(), + modTime: src.ModTime(ctx), + } + fs.Debugf(f, ">>> Put: %q %d '%v'", o.remote, o.size, o.modTime) + return o, o.Update(ctx, in, src, options...) +} + +// Update an existing object +// Copy the reader into the object updating modTime and size +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + wrapIn := in + size := src.Size() + if size < 0 { + return errors.New("mailru does not support streaming uploads") + } + + err := o.fs.mkParentDirs(ctx, o.absPath()) + if err != nil { + return err + } + + var ( + fileBuf []byte + fileHash []byte + newHash []byte + ) + + // Request hash from source + if srcHash, err := src.Hash(ctx, hash.Mailru); err == nil && srcHash != "" { + fileHash, _ = mrhash.DecodeString(srcHash) + } + + // Try speedup method if it's globally enabled and source hash is available + trySpeedup := o.fs.opt.SpeedupEnable + if trySpeedup && fileHash != nil { + if o.putByHash(ctx, fileHash, src, "source") { + return nil + } + trySpeedup = false // speedup failed, force upload + } + + // Need to calculate hash, check whether file is still eligible for speedup + if trySpeedup { + trySpeedup = o.fs.eligibleForSpeedup(o.Remote(), size, options...) + } + + // Attempt to put by calculating hash in memory + if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) { + //fs.Debugf(o, "attempt to put by hash from memory") + fileBuf, err = ioutil.ReadAll(in) + if err != nil { + return err + } + fileHash = mrhash.Sum(fileBuf) + if o.putByHash(ctx, fileHash, src, "memory") { + return nil + } + wrapIn = bytes.NewReader(fileBuf) + trySpeedup = false // speedup failed, force upload + } + + // Attempt to put by hash using a spool file + if trySpeedup { + tmpFs, err := fs.TemporaryLocalFs() + if err != nil { + fs.Infof(tmpFs, "Failed to create spool FS: %v", err) + } else { + defer func() { + if err := operations.Purge(ctx, tmpFs, ""); err != nil { + fs.Infof(tmpFs, "Failed to cleanup spool FS: %v", err) + } + }() + + spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src) + if err != nil { + return errors.Wrap(err, "Failed to create spool file") + } + if o.putByHash(ctx, mrHash, src, "spool") { + // If put by hash is successful, ignore transitive error + return nil + } + if wrapIn, err = spoolFile.Open(ctx); err != nil { + return err + } + fileHash = mrHash + } + } + + // Upload object data + if size <= mrhash.Size { + // Optimize upload: skip extra request if data fits in the hash buffer. + if fileBuf == nil { + fileBuf, err = ioutil.ReadAll(wrapIn) + } + if fileHash == nil && err == nil { + fileHash = mrhash.Sum(fileBuf) + } + newHash = fileHash + } else { + var hasher gohash.Hash + if fileHash == nil { + // Calculate hash in transit + hasher = mrhash.New() + wrapIn = io.TeeReader(wrapIn, hasher) + } + newHash, err = o.upload(wrapIn, size, options...) + if fileHash == nil && err == nil { + fileHash = hasher.Sum(nil) + } + } + if err != nil { + return err + } + + if bytes.Compare(fileHash, newHash) != 0 { + if o.fs.opt.CheckHash { + return mrhash.ErrorInvalidHash + } + fs.Infof(o, "hash mismatch on upload: expected %x received %x", fileHash, newHash) + } + o.mrHash = newHash + o.size = size + o.modTime = src.ModTime(ctx) + return o.addFileMetaData(ctx, true) +} + +// eligibleForSpeedup checks whether file is eligible for speedup method (put by hash) +func (f *Fs) eligibleForSpeedup(remote string, size int64, options ...fs.OpenOption) bool { + if !f.opt.SpeedupEnable { + return false + } + if size <= mrhash.Size || size < speedupMinSize || size >= int64(f.opt.SpeedupMaxDisk) { + return false + } + _, _, partial := getTransferRange(size, options...) + if partial { + return false + } + if f.speedupAny { + return true + } + if f.speedupGlobs == nil { + return false + } + nameLower := strings.ToLower(strings.TrimSpace(path.Base(remote))) + for _, pattern := range f.speedupGlobs { + if matches, _ := filepath.Match(pattern, nameLower); matches { + return true + } + } + return false +} + +// parseSpeedupPatterns converts pattern string into list of unique glob patterns +func (f *Fs) parseSpeedupPatterns(patternString string) (err error) { + f.speedupGlobs = nil + f.speedupAny = false + uniqueValidPatterns := make(map[string]interface{}) + + for _, pattern := range strings.Split(patternString, ",") { + pattern = strings.ToLower(strings.TrimSpace(pattern)) + if pattern == "" { + continue + } + if pattern == "*" { + f.speedupAny = true + } + if _, err := filepath.Match(pattern, ""); err != nil { + return fmt.Errorf("invalid file name pattern %q", pattern) + } + uniqueValidPatterns[pattern] = nil + } + for pattern := range uniqueValidPatterns { + f.speedupGlobs = append(f.speedupGlobs, pattern) + } + return nil +} + +func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool { + oNew := new(Object) + *oNew = *o + oNew.mrHash = mrHash + oNew.size = info.Size() + oNew.modTime = info.ModTime(ctx) + if err := oNew.addFileMetaData(ctx, true); err != nil { + fs.Debugf(o, "Cannot put by hash from %s, performing upload", method) + return false + } + *o = *oNew + fs.Debugf(o, "File has been put by hash from %s", method) + return true +} + +func makeTempFile(ctx context.Context, tmpFs fs.Fs, wrapIn io.Reader, src fs.ObjectInfo) (spoolFile fs.Object, mrHash []byte, err error) { + // Local temporary file system must support SHA1 + hashType := hash.SHA1 + + // Calculate Mailru and spool verification hashes in transit + hashSet := hash.NewHashSet(hash.Mailru, hashType) + hasher, err := hash.NewMultiHasherTypes(hashSet) + if err != nil { + return nil, nil, err + } + wrapIn = io.TeeReader(wrapIn, hasher) + + // Copy stream into spool file + tmpInfo := object.NewStaticObjectInfo(src.Remote(), src.ModTime(ctx), src.Size(), false, nil, nil) + hashOption := &fs.HashesOption{Hashes: hashSet} + if spoolFile, err = tmpFs.Put(ctx, wrapIn, tmpInfo, hashOption); err != nil { + return nil, nil, err + } + + // Validate spool file + sums := hasher.Sums() + checkSum := sums[hashType] + fileSum, err := spoolFile.Hash(ctx, hashType) + if spoolFile.Size() != src.Size() || err != nil || checkSum == "" || fileSum != checkSum { + return nil, nil, mrhash.ErrorInvalidHash + } + + mrHash, err = mrhash.DecodeString(sums[hash.Mailru]) + return +} + +func (o *Object) upload(in io.Reader, size int64, options ...fs.OpenOption) ([]byte, error) { + token, err := o.fs.accessToken() + if err != nil { + return nil, err + } + shardURL, err := o.fs.uploadShard() + if err != nil { + return nil, err + } + + opts := rest.Opts{ + Method: "PUT", + RootURL: shardURL, + Body: in, + Options: options, + ContentLength: &size, + Parameters: url.Values{ + "client_id": {api.OAuthClientID}, + "token": {token}, + }, + ExtraHeaders: map[string]string{ + "Accept": "*/*", + }, + } + + var ( + res *http.Response + strHash string + ) + err = o.fs.pacer.Call(func() (bool, error) { + res, err = o.fs.srv.Call(&opts) + if err == nil { + strHash, err = readBodyWord(res) + } + return fserrors.ShouldRetry(err), err + }) + if err != nil { + closeBody(res) + return nil, err + } + + switch res.StatusCode { + case 200, 201: + return mrhash.DecodeString(strHash) + default: + return nil, fmt.Errorf("upload failed with code %s (%d)", res.Status, res.StatusCode) + } +} + +func (f *Fs) uploadShard() (string, error) { + f.shardMu.Lock() + defer f.shardMu.Unlock() + + if f.shardURL != "" && time.Now().Before(f.shardExpiry) { + return f.shardURL, nil + } + + token, err := f.accessToken() + if err != nil { + return "", err + } + + opts := rest.Opts{ + Method: "GET", + Path: "/api/m1/dispatcher", + Parameters: url.Values{ + "client_id": {api.OAuthClientID}, + "access_token": {token}, + }, + } + + var info api.ShardInfoResponse + err = f.pacer.Call(func() (bool, error) { + res, err := f.srv.CallJSON(&opts, nil, &info) + return shouldRetry(res, err, f, &opts) + }) + if err != nil { + return "", err + } + + f.shardURL = info.Body.Upload[0].URL + f.shardExpiry = time.Now().Add(shardExpirySec * time.Second) + fs.Debugf(f, "new upload shard: %s", f.shardURL) + + return f.shardURL, nil +} + +// Object describes a mailru object +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + hasMetaData bool // whether info below has been set + size int64 // Bytes in the object + modTime time.Time // Modified time of the object + mrHash []byte // Mail.ru flavored SHA1 hash of the object +} + +// NewObject finds an Object at the remote. +// If object can't be found it fails with fs.ErrorObjectNotFound +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + fs.Debugf(f, ">>> NewObject %q", remote) + o := &Object{ + fs: f, + remote: remote, + } + err := o.readMetaData(ctx, true) + if err != nil { + return nil, err + } + return o, nil +} + +// absPath converts root-relative remote to absolute home path +func (o *Object) absPath() string { + return o.fs.absPath(o.remote) +} + +// Object.readMetaData reads and fills a file info +// If object can't be found it fails with fs.ErrorObjectNotFound +func (o *Object) readMetaData(ctx context.Context, force bool) error { + if o.hasMetaData && !force { + return nil + } + entry, dirSize, err := o.fs.readItemMetaData(ctx, o.absPath()) + if err != nil { + return err + } + newObj, ok := entry.(*Object) + if !ok || dirSize >= 0 { + return fs.ErrorNotAFile + } + if newObj.remote != o.remote { + return fmt.Errorf("File %q path has changed to %q", o.remote, newObj.remote) + } + o.hasMetaData = true + o.size = newObj.size + o.modTime = newObj.modTime + o.mrHash = newObj.mrHash + return nil +} + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + //return fmt.Sprintf("[%s]%q", o.fs.root, o.remote) + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// ModTime returns the modification time of the object +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + err := o.readMetaData(ctx, false) + if err != nil { + fs.Errorf(o, "%v", err) + } + return o.modTime +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + ctx := context.Background() // Note: Object.Size does not pass context! + err := o.readMetaData(ctx, false) + if err != nil { + fs.Errorf(o, "%v", err) + } + return o.size +} + +// Hash returns the MD5 or SHA1 sum of an object +// returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + if t == hash.Mailru { + return hex.EncodeToString(o.mrHash), nil + } + return "", hash.ErrUnsupported +} + +// Storable returns whether this object is storable +func (o *Object) Storable() bool { + return true +} + +// SetModTime sets the modification time of the local fs object +// +// Commits the datastore +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + fs.Debugf(o, ">>> SetModTime [%v]", modTime) + o.modTime = modTime + return o.addFileMetaData(ctx, true) +} + +func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error { + if len(o.mrHash) != mrhash.Size { + return mrhash.ErrorInvalidHash + } + token, err := o.fs.accessToken() + if err != nil { + return err + } + metaURL, err := o.fs.metaServer() + if err != nil { + return err + } + + req := api.NewBinWriter() + req.WritePu16(api.OperationAddFile) + req.WritePu16(0) // revision + req.WriteString(o.absPath()) + req.WritePu64(o.size) + req.WritePu64(o.modTime.Unix()) + req.WritePu32(0) + req.Write(o.mrHash) + + if overwrite { + // overwrite + req.WritePu32(1) + } else { + // don't add if not changed, add with rename if changed + req.WritePu32(55) + req.Write(o.mrHash) + req.WritePu64(o.size) + } + + opts := rest.Opts{ + Method: "POST", + RootURL: metaURL, + Parameters: url.Values{ + "client_id": {api.OAuthClientID}, + "token": {token}, + }, + ContentType: api.BinContentType, + Body: req.Reader(), + } + + var res *http.Response + err = o.fs.pacer.Call(func() (bool, error) { + res, err = o.fs.srv.Call(&opts) + return shouldRetry(res, err, o.fs, &opts) + }) + if err != nil { + closeBody(res) + return err + } + + reply := api.NewBinReader(res.Body) + defer closeBody(res) + + switch status := reply.ReadByteAsInt(); status { + case api.AddResultOK, api.AddResultNotModified, api.AddResultDunno04, api.AddResultDunno09: + return nil + case api.AddResultInvalidName: + return ErrorInvalidName + default: + return fmt.Errorf("add file error %d", status) + } +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + fs.Debugf(o, ">>> Remove") + return o.fs.delete(ctx, o.absPath(), false) +} + +// getTransferRange detects partial transfers and calculates start/end offsets into file +func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end int64, partial bool) { + var offset, limit int64 = 0, -1 + + for _, option := range options { + switch opt := option.(type) { + case *fs.SeekOption: + offset = opt.Offset + case *fs.RangeOption: + offset, limit = opt.Decode(size) + default: + if option.Mandatory() { + fs.Errorf(nil, "Unsupported mandatory option: %v", option) + } + } + } + if limit < 0 { + limit = size - offset + } + end = offset + limit + if end > size { + end = size + } + partial = !(offset == 0 && end == size) + return offset, end, partial +} + +// Open an object for read and download its content +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + fs.Debugf(o, ">>> Open") + + token, err := o.fs.accessToken() + if err != nil { + return nil, err + } + + start, end, partial := getTransferRange(o.size, options...) + + // TODO: set custom timeouts + opts := rest.Opts{ + Method: "GET", + Options: options, + Path: url.PathEscape(strings.TrimLeft(o.absPath(), "/")), + Parameters: url.Values{ + "client_id": {api.OAuthClientID}, + "token": {token}, + }, + ExtraHeaders: map[string]string{ + "Accept": "*/*", + "Range": fmt.Sprintf("bytes=%d-%d", start, end-1), + }, + } + + var res *http.Response + server := "" + err = o.fs.pacer.Call(func() (bool, error) { + server, err = o.fs.fileServers.Dispatch(server) + if err != nil { + return false, err + } + opts.RootURL = server + res, err = o.fs.srv.Call(&opts) + return shouldRetry(res, err, o.fs, &opts) + }) + if err != nil { + if res != nil && res.Body != nil { + closeBody(res) + } + return nil, err + } + + var hasher gohash.Hash + if !partial { + // Cannot check hash of partial download + hasher = mrhash.New() + } + wrapStream := &endHandler{ + ctx: ctx, + stream: res.Body, + hasher: hasher, + o: o, + server: server, + } + return wrapStream, nil +} + +type endHandler struct { + ctx context.Context + stream io.ReadCloser + hasher gohash.Hash + o *Object + server string + done bool +} + +func (e *endHandler) Read(p []byte) (n int, err error) { + n, err = e.stream.Read(p) + if e.hasher != nil { + // hasher will not return an error, just panic + _, _ = e.hasher.Write(p[:n]) + } + if err != nil { // io.Error or EOF + err = e.handle(err) + } + return +} + +func (e *endHandler) Close() error { + _ = e.handle(nil) // ignore returned error + return e.stream.Close() +} + +func (e *endHandler) handle(err error) error { + if e.done { + return err + } + e.done = true + o := e.o + + o.fs.fileServers.Free(e.server) + if err != io.EOF || e.hasher == nil { + return err + } + + newHash := e.hasher.Sum(nil) + if bytes.Compare(o.mrHash, newHash) == 0 { + return io.EOF + } + if o.fs.opt.CheckHash { + return mrhash.ErrorInvalidHash + } + fs.Infof(o, "hash mismatch on download: expected %x received %x", o.mrHash, newHash) + return io.EOF +} + +// serverPool backs server dispacher +type serverPool struct { + pool pendingServerMap + mu sync.Mutex + path string + expirySec time.Duration + fs *Fs +} + +type pendingServerMap map[string]*pendingServer + +type pendingServer struct { + locks int + expiry time.Time +} + +// Dispatch dispatches next download server. +// It prefers switching and tries to avoid current server +// in use by caller because it may be overloaded or slow. +func (p *serverPool) Dispatch(current string) (string, error) { + now := time.Now() + url := p.getServer(current, now) + if url != "" { + return url, nil + } + + // Server not found - ask Mailru dispatcher. + opts := rest.Opts{ + Method: "GET", + RootURL: api.DispatchServerURL, + Path: p.path, + } + var ( + res *http.Response + err error + ) + err = p.fs.pacer.Call(func() (bool, error) { + res, err = p.fs.srv.Call(&opts) + if err != nil { + return fserrors.ShouldRetry(err), err + } + url, err = readBodyWord(res) + return fserrors.ShouldRetry(err), err + }) + if err != nil || url == "" { + closeBody(res) + return "", errors.Wrap(err, "Failed to request file server") + } + + p.addServer(url, now) + return url, nil +} + +func (p *serverPool) Free(url string) { + if url == "" { + return + } + p.mu.Lock() + defer p.mu.Unlock() + + srv := p.pool[url] + if srv == nil { + return + } + + if srv.locks <= 0 { + // Getting here indicates possible race + fs.Infof(p.fs, "Purge file server: locks -, url %s", url) + delete(p.pool, url) + return + } + + srv.locks-- + if srv.locks == 0 && time.Now().After(srv.expiry) { + delete(p.pool, url) + fs.Debugf(p.fs, "Free file server: locks 0, url %s", url) + return + } + fs.Debugf(p.fs, "Unlock file server: locks %d, url %s", srv.locks, url) +} + +// Find an underlocked server +func (p *serverPool) getServer(current string, now time.Time) string { + p.mu.Lock() + defer p.mu.Unlock() + + for url, srv := range p.pool { + if url == "" || srv.locks < 0 { + continue // Purged server slot + } + if url == current { + continue // Current server - prefer another + } + if srv.locks >= maxServerLocks { + continue // Overlocked server + } + if now.After(srv.expiry) { + continue // Expired server + } + + srv.locks++ + fs.Debugf(p.fs, "Lock file server: locks %d, url %s", srv.locks, url) + return url + } + + return "" +} + +func (p *serverPool) addServer(url string, now time.Time) { + p.mu.Lock() + defer p.mu.Unlock() + + expiry := now.Add(p.expirySec * time.Second) + + expiryStr := []byte("-") + if fs.Config.LogLevel >= fs.LogLevelInfo { + expiryStr, _ = expiry.MarshalJSON() + } + + // Attach to a server proposed by dispatcher + srv := p.pool[url] + if srv != nil { + srv.locks++ + srv.expiry = expiry + fs.Debugf(p.fs, "Reuse file server: locks %d, url %s, expiry %s", srv.locks, url, expiryStr) + return + } + + // Add new server + p.pool[url] = &pendingServer{locks: 1, expiry: expiry} + fs.Debugf(p.fs, "Switch file server: locks 1, url %s, expiry %s", url, expiryStr) +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("[%s]", f.root) +} + +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + return time.Second +} + +// Hashes returns the supported hash sets +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.Mailru) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// close response body ignoring errors +func closeBody(res *http.Response) { + if res != nil { + _ = res.Body.Close() + } +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.PublicLinker = (*Fs)(nil) + _ fs.CleanUpper = (*Fs)(nil) + _ fs.Abouter = (*Fs)(nil) + _ fs.Object = (*Object)(nil) +) diff --git a/backend/mailru/mailru_test.go b/backend/mailru/mailru_test.go new file mode 100644 index 000000000..dd05f8abf --- /dev/null +++ b/backend/mailru/mailru_test.go @@ -0,0 +1,18 @@ +// Test Mailru filesystem interface +package mailru_test + +import ( + "testing" + + "github.com/rclone/rclone/backend/mailru" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestMailru:", + NilObject: (*mailru.Object)(nil), + SkipBadWindowsCharacters: true, + }) +} diff --git a/bin/make_manual.py b/bin/make_manual.py index afb29feb4..14bd63393 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -41,6 +41,7 @@ docs = [ "hubic.md", "jottacloud.md", "koofr.md", + "mailru.md", "mega.md", "azureblob.md", "onedrive.md", diff --git a/docs/content/about.md b/docs/content/about.md index 57d9cc4e2..93a799734 100644 --- a/docs/content/about.md +++ b/docs/content/about.md @@ -30,6 +30,7 @@ Rclone is a command line program to sync files and directories to and from: * {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}} * {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}} * {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}} +* {{< provider name="Mail.ru Cloud" home="https://cloud.mail.ru/" config="/mailru/" >}} * {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}} * {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}} * {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}} diff --git a/docs/content/docs.md b/docs/content/docs.md index 29620dce0..953b1d03b 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -37,6 +37,7 @@ See the following for detailed instructions for * [Hubic](/hubic/) * [Jottacloud](/jottacloud/) * [Koofr](/koofr/) + * [Mail.ru Cloud](/mailru/) * [Mega](/mega/) * [Microsoft Azure Blob Storage](/azureblob/) * [Microsoft OneDrive](/onedrive/) diff --git a/docs/content/mailru.md b/docs/content/mailru.md new file mode 100644 index 000000000..63e612a70 --- /dev/null +++ b/docs/content/mailru.md @@ -0,0 +1,280 @@ +--- +title: "Mailru" +description: "Mail.ru Cloud" +date: "2019-08-04" +--- + + Mail.ru Cloud +---------------------------------------- + +[Mail.ru Cloud](https://cloud.mail.ru/) is a cloud storage provided by a Russian internet company [Mail.Ru Group](https://mail.ru). The official desktop client is [Disk-O:](https://disk-o.cloud/), available only on Windows. (Please note that official sites are in Russian) + +### Features highlights ### + +- Paths may be as deep as required, eg `remote:directory/subdirectory` +- Files have a `last modified time` property, directories don't +- Deleted files are by default moved to the trash +- Files and directories can be shared via public links +- Partial uploads or streaming are not supported, file size must be known before upload +- Maximum file size is limited to 2G for a free acount, unlimited for paid accounts +- Storage keeps hash for all files and performs transparent deduplication, + the hash algorithm is a modified SHA1 +- If a particular file is already present in storage, one can quickly submit file hash + instead of long file upload (this optimization is supported by rclone) + +### Configuration ### + +Here is an example of making a mailru configuration. First create a Mail.ru Cloud +account and choose a tariff, then run + + rclone config + +This will guide you through an interactive setup process: + +``` +No remotes found - make a new one +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> remote +Type of storage to configure. +Type of storage to configure. +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value +[snip] +XX / Mail.ru Cloud + \ "mailru" +[snip] +Storage> mailru +User name (usually email) +Enter a string value. Press Enter for the default (""). +user> username@mail.ru +Password +y) Yes type in my own password +g) Generate random password +y/g> y +Enter the password: +password: +Confirm the password: +password: +Skip full upload if there is another file with same data hash. +This feature is called "speedup" or "put by hash". It is especially efficient +in case of generally available files like popular books, video or audio clips +[snip] +Enter a boolean value (true or false). Press Enter for the default ("true"). +Choose a number from below, or type in your own value + 1 / Enable + \ "true" + 2 / Disable + \ "false" +speedup_enable> 1 +Edit advanced config? (y/n) +y) Yes +n) No +y/n> n +Remote config +-------------------- +[remote] +type = mailru +user = username@mail.ru +pass = *** ENCRYPTED *** +speedup_enable = true +-------------------- +y) Yes this is OK +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +Configuration of this backend does not require a local web browser. +You can use the configured backend as shown below: + +See top level directories + + rclone lsd remote: + +Make a new directory + + rclone mkdir remote:directory + +List the contents of a directory + + rclone ls remote:directory + +Sync `/home/local/directory` to the remote path, deleting any +excess files in the path. + + rclone sync /home/local/directory remote:directory + +### Modified time ### + +Files support a modification time attribute with up to 1 second precision. +Directories do not have a modification time, which is shown as "Jan 1 1970". + +### Hash checksums ### + +Hash sums use a custom Mail.ru algorithm based on SHA1. +If file size is less than or equal to the SHA1 block size (20 bytes), +its hash is simply its data right-padded with zero bytes. +Hash sum of a larger file is computed as a SHA1 sum of the file data +bytes concatenated with a decimal representation of the data length. + +### Emptying Trash ### + +Removing a file or directory actually moves it to the trash, which is not +visible to rclone but can be seen in a web browser. The trashed file +still occupies part of total quota. If you wish to empty your trash +and free some quota, you can use the `rclone cleanup remote:` command, +which will permanently delete all your trashed files. +This command does not take any path arguments. + +### Quota information ### + +To view your current quota you can use the `rclone about remote:` +command which will display your usage limit (quota) and the current usage. + +### Limitations ### + +File size limits depend on your account. A single file size is limited by 2G +for a free account and unlimited for paid tariffs. Please refer to the Mail.ru +site for the total uploaded size limits. + +Note that Mailru is case insensitive so you can't have a file called +"Hello.doc" and one called "hello.doc". + + +### Standard Options + +Here are the standard options specific to mailru (Mail.ru Cloud). + +#### --mailru-user + +User name (usually email) + +- Config: user +- Env Var: RCLONE_MAILRU_USER +- Type: string +- Default: "" + +#### --mailru-pass + +Password + +- Config: pass +- Env Var: RCLONE_MAILRU_PASS +- Type: string +- Default: "" + +#### --mailru-speedup-enable + +Skip full upload if there is another file with same data hash. +This feature is called "speedup" or "put by hash". It is especially efficient +in case of generally available files like popular books, video or audio clips, +because files are searched by hash in all accounts of all mailru users. +Please note that rclone may need local memory and disk space to calculate +content hash in advance and decide whether full upload is required. +Also, if rclone does not know file size in advance (e.g. in case of +streaming or partial uploads), it will not even try this optimization. + +- Config: speedup_enable +- Env Var: RCLONE_MAILRU_SPEEDUP_ENABLE +- Type: bool +- Default: true +- Examples: + - "true" + - Enable + - "false" + - Disable + +### Advanced Options + +Here are the advanced options specific to mailru (Mail.ru Cloud). + +#### --mailru-speedup-file-patterns + +Comma separated list of file name patterns eligible for speedup (put by hash). +Patterns are case insensitive and can contain `*` or `?` meta characters. + +- Config: speedup_file_patterns +- Env Var: RCLONE_MAILRU_SPEEDUP_FILE_PATTERNS +- Type: string +- Default: "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf" +- Examples: + - `""` + - Empty list completely disables speedup (put by hash). + - `"*"` + - All files will be attempted for speedup. + - `"*.mkv,*.avi,*.mp4,*.mp3"` + - Only common audio/video files will be tried for put by hash. + - `"*.zip,*.gz,*.rar,*.pdf"` + - Only common archives or PDF books will be tried for speedup. + +#### --mailru-speedup-max-disk + +This option allows you to disable speedup (put by hash) for large files +(because preliminary hashing can exhaust you RAM or disk space) + +- Config: speedup_max_disk +- Env Var: RCLONE_MAILRU_SPEEDUP_MAX_DISK +- Type: SizeSuffix +- Default: 3G +- Examples: + - "0" + - Completely disable speedup (put by hash). + - "1G" + - Files larger than 1Gb will be uploaded directly. + - "3G" + - Choose this option if you have less than 3Gb free on local disk. + +#### --mailru-speedup-max-memory + +Files larger than the size given below will always be hashed on disk. + +- Config: speedup_max_memory +- Env Var: RCLONE_MAILRU_SPEEDUP_MAX_MEMORY +- Type: SizeSuffix +- Default: 32M +- Examples: + - "0" + - Preliminary hashing will always be done in a temporary disk location. + - "32M" + - Do not dedicate more than 32Mb RAM for preliminary hashing. + - "256M" + - You have at most 256Mb RAM free for hash calculations. + +#### --mailru-check-hash + +What should copy do if file checksum is mismatched or invalid + +- Config: check_hash +- Env Var: RCLONE_MAILRU_CHECK_HASH +- Type: bool +- Default: true +- Examples: + - "true" + - Fail with error. + - "false" + - Ignore and continue. + +#### --mailru-user-agent + +HTTP user agent used internally by client. +Defaults to "rclone/VERSION" or "--user-agent" provided on command line. + +- Config: user_agent +- Env Var: RCLONE_MAILRU_USER_AGENT +- Type: string +- Default: "" + +#### --mailru-quirks + +Comma separated list of internal maintenance flags. This option is intended +for development purposes. Should not be used by an ordinary user. + +- Config: quirks +- Env Var: RCLONE_MAILRU_QUIRKS +- Type: string +- Default: "" + + diff --git a/docs/content/overview.md b/docs/content/overview.md index 69eff7d91..7f7fe5e89 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -31,6 +31,7 @@ Here is an overview of the major features of each cloud storage system. | Hubic | MD5 | Yes | No | No | R/W | | Jottacloud | MD5 | Yes | Yes | No | R/W | | Koofr | MD5 | No | Yes | No | - | +| Mail.ru Cloud | Mailru ‡‡‡ | Yes | Yes | No | - | | Mega | - | No | No | Yes | - | | Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W | | Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R | @@ -70,6 +71,8 @@ or `sha1sum` as well as `echo` are in the remote's PATH. for business and SharePoint server support Microsoft's own [QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash). +‡‡‡ Mail.ru uses its own modified SHA1 hash + ### ModTime ### The cloud storage system supports setting modification times on @@ -148,6 +151,7 @@ operations more efficient. | HTTP | No | No | No | No | No | No | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | Yes | | Hubic | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | No | | Jottacloud | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes | +| Mail.ru Cloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes | | Mega | Yes | No | Yes | Yes | Yes | No | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes | | Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No | | Microsoft OneDrive | Yes | Yes | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | No | Yes | Yes | Yes | diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 461ac40a1..ea0d527af 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -73,6 +73,7 @@
  • Hubic
  • Jottacloud
  • Koofr
  • +
  • Mail.ru Cloud
  • Mega
  • Microsoft Azure Blob Storage
  • Microsoft OneDrive
  • diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index d6d2ef5af..b51c7a1be 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -130,3 +130,7 @@ backends: remote: "TestPutio:" subdir: false fastlist: false + - backend: "mailru" + remote: "TestMailru:" + subdir: false + fastlist: false