b2: make all operations work from the root #3421

This commit is contained in:
Nick Craig-Wood 2019-08-09 15:19:02 +01:00
parent eaaf2ded94
commit e0e0e0c7bd
2 changed files with 299 additions and 252 deletions

View File

@ -14,7 +14,6 @@ import (
"io" "io"
"net/http" "net/http"
"path" "path"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -30,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
) )
@ -164,24 +164,24 @@ type Options struct {
// Fs represents a remote b2 server // Fs represents a remote b2 server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed config options opt Options // parsed config options
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the b2 server srv *rest.Client // the connection to the b2 server
bucket string // the bucket we are working on rootBucket string // bucket part of root (if any)
bucketOKMu sync.Mutex // mutex to protect bucket OK rootDirectory string // directory part of root (if any)
bucketOK bool // true if we have created the bucket cache *bucket.Cache // cache for bucket creation status
bucketIDMutex sync.Mutex // mutex to protect _bucketID bucketIDMutex sync.Mutex // mutex to protect _bucketID
_bucketID string // the ID of the bucket we are working on _bucketID map[string]string // the ID of the bucket we are working on
bucketTypeMutex sync.Mutex // mutex to protect _bucketType bucketTypeMutex sync.Mutex // mutex to protect _bucketType
_bucketType string // the Type of the bucket we are working on _bucketType map[string]string // the Type of the bucket we are working on
info api.AuthorizeAccountResponse // result of authorize call info api.AuthorizeAccountResponse // result of authorize call
uploadMu sync.Mutex // lock for upload variable uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadURLResponse // result of get upload URL calls uploads map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID
authMu sync.Mutex // lock for authorizing the account authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
bufferTokens chan []byte // control concurrency of multipart uploads bufferTokens chan []byte // control concurrency of multipart uploads
} }
// Object describes a b2 object // Object describes a b2 object
@ -204,18 +204,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs) // Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { func (f *Fs) Root() string {
if f.root == "" { return f.root
return f.bucket
}
return f.bucket + "/" + f.root
} }
// String converts this Fs to a string // String converts this Fs to a string
func (f *Fs) String() string { func (f *Fs) String() string {
if f.root == "" { if f.rootBucket == "" {
return fmt.Sprintf("B2 bucket %s", f.bucket) return fmt.Sprintf("B2 root")
} }
return fmt.Sprintf("B2 bucket %s path %s", f.bucket, f.root) if f.rootDirectory == "" {
return fmt.Sprintf("B2 bucket %s", f.rootBucket)
}
return fmt.Sprintf("B2 bucket %s path %s", f.rootBucket, f.rootDirectory)
} }
// Features returns the optional features of this Fs // Features returns the optional features of this Fs
@ -223,21 +223,23 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// Pattern to match a b2 path // parsePath parses a remote 'url'
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`) func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// parseParse parses a b2 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in b2 path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return return
} }
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// retryErrorCodes is a slice of error codes that we will retry // retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{ var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired") 401, // Unauthorized (eg "Token has expired")
@ -335,6 +337,12 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return return
} }
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path // NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background() ctx := context.Background()
@ -352,10 +360,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, errors.Wrap(err, "b2: chunk size") return nil, errors.Wrap(err, "b2: chunk size")
} }
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
if opt.Account == "" { if opt.Account == "" {
return nil, errors.New("account not found") return nil, errors.New("account not found")
} }
@ -366,17 +370,21 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt.Endpoint = defaultEndpoint opt.Endpoint = defaultEndpoint
} }
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
bucket: bucket, srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
root: directory, cache: bucket.NewCache(),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler), _bucketID: make(map[string]string, 1),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), _bucketType: make(map[string]string, 1),
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f) }).Fill(f)
// Set the test flag if required // Set the test flag if required
if opt.TestMode != "" { if opt.TestMode != "" {
@ -390,33 +398,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "failed to authorize account") return nil, errors.Wrap(err, "failed to authorize account")
} }
// If this is a key limited to a single bucket, it must exist already // If this is a key limited to a single bucket, it must exist already
if f.bucket != "" && f.info.Allowed.BucketID != "" { if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := f.info.Allowed.BucketName allowedBucket := f.info.Allowed.BucketName
if allowedBucket == "" { if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists") return nil, errors.New("bucket that application key is restricted to no longer exists")
} }
if allowedBucket != f.bucket { if allowedBucket != f.rootBucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket) return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
} }
f.markBucketOK() f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.info.Allowed.BucketID) f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
} }
if f.root != "" { if f.rootBucket != "" && f.rootDirectory != "" {
f.root += "/"
// Check to see if the (bucket,directory) is actually an existing file // Check to see if the (bucket,directory) is actually an existing file
oldRoot := f.root oldRoot := f.root
remote := path.Base(directory) newRoot, leaf := path.Split(oldRoot)
f.root = path.Dir(directory) f.setRoot(newRoot)
if f.root == "." { _, err := f.NewObject(ctx, leaf)
f.root = ""
} else {
f.root += "/"
}
_, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
f.root = oldRoot f.setRoot(oldRoot)
return f, nil return f, nil
} }
return nil, err return nil, err
@ -464,30 +466,34 @@ func (f *Fs) hasPermission(permission string) bool {
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
// //
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (f *Fs) getUploadURL() (upload *api.GetUploadURLResponse, err error) { func (f *Fs) getUploadURL(bucket string) (upload *api.GetUploadURLResponse, err error) {
f.uploadMu.Lock() f.uploadMu.Lock()
defer f.uploadMu.Unlock() defer f.uploadMu.Unlock()
bucketID, err := f.getBucketID() bucketID, err := f.getBucketID(bucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(f.uploads) == 0 { // look for a stored upload URL for the correct bucketID
opts := rest.Opts{ uploads := f.uploads[bucketID]
Method: "POST", if len(uploads) > 0 {
Path: "/b2_get_upload_url", upload, uploads = uploads[0], uploads[1:]
} f.uploads[bucketID] = uploads
var request = api.GetUploadURLRequest{ return upload, nil
BucketID: bucketID, }
} // get a new upload URL since not found
err := f.pacer.Call(func() (bool, error) { opts := rest.Opts{
resp, err := f.srv.CallJSON(&opts, &request, &upload) Method: "POST",
return f.shouldRetry(resp, err) Path: "/b2_get_upload_url",
}) }
if err != nil { var request = api.GetUploadURLRequest{
return nil, errors.Wrap(err, "failed to get upload URL") BucketID: bucketID,
} }
} else { err = f.pacer.Call(func() (bool, error) {
upload, f.uploads = f.uploads[0], f.uploads[1:] resp, err := f.srv.CallJSON(&opts, &request, &upload)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
} }
return upload, nil return upload, nil
} }
@ -498,14 +504,14 @@ func (f *Fs) returnUploadURL(upload *api.GetUploadURLResponse) {
return return
} }
f.uploadMu.Lock() f.uploadMu.Lock()
f.uploads = append(f.uploads, upload) f.uploads[upload.BucketID] = append(f.uploads[upload.BucketID], upload)
f.uploadMu.Unlock() f.uploadMu.Unlock()
} }
// clearUploadURL clears the current UploadURL and the AuthorizationToken // clearUploadURL clears the current UploadURL and the AuthorizationToken
func (f *Fs) clearUploadURL() { func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Lock() f.uploadMu.Lock()
f.uploads = nil delete(f.uploads, bucketID)
f.uploadMu.Unlock() f.uploadMu.Unlock()
} }
@ -575,27 +581,35 @@ var errEndList = errors.New("end list")
// list lists the objects into the function supplied from // list lists the objects into the function supplied from
// the bucket and root supplied // the bucket and root supplied
// //
// dir is the starting directory, "" for root // (bucket, directory) is the starting directory
// //
// level is the depth to search to // If prefix is set then it is removed from all file names
// //
// If prefix is set then startFileName is used as a prefix which all // If addBucket is set then it adds the bucket to the start of the
// files must have // remotes generated
//
// If recurse is set the function will recursively list
// //
// If limit is > 0 then it limits to that many files (must be less // If limit is > 0 then it limits to that many files (must be less
// than 1000) // than 1000)
// //
// If hidden is set then it will list the hidden (deleted) files too. // If hidden is set then it will list the hidden (deleted) files too.
func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error { //
root := f.root // if findFile is set it will look for files called (bucket, directory)
if dir != "" { func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int, hidden bool, findFile bool, fn listFn) error {
root += dir + "/" if !findFile {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
} }
delimiter := "" delimiter := ""
if !recurse { if !recurse {
delimiter = "/" delimiter = "/"
} }
bucketID, err := f.getBucketID() bucketID, err := f.getBucketID(bucket)
if err != nil { if err != nil {
return err return err
} }
@ -606,12 +620,11 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string,
var request = api.ListFileNamesRequest{ var request = api.ListFileNamesRequest{
BucketID: bucketID, BucketID: bucketID,
MaxFileCount: chunkSize, MaxFileCount: chunkSize,
Prefix: root, Prefix: directory,
Delimiter: delimiter, Delimiter: delimiter,
} }
prefix = root + prefix if directory != "" {
if prefix != "" { request.StartFileName = directory
request.StartFileName = prefix
} }
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@ -635,16 +648,19 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string,
if prefix != "" && !strings.HasPrefix(file.Name, prefix) { if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
return nil return nil
} }
if !strings.HasPrefix(file.Name, f.root) { if !strings.HasPrefix(file.Name, prefix) {
fs.Debugf(f, "Odd name received %q", file.Name) fs.Debugf(f, "Odd name received %q", file.Name)
continue continue
} }
remote := file.Name[len(f.root):] remote := file.Name[len(prefix):]
// Check for directory // Check for directory
isDirectory := strings.HasSuffix(remote, "/") isDirectory := strings.HasSuffix(remote, "/")
if isDirectory { if isDirectory {
remote = remote[:len(remote)-1] remote = remote[:len(remote)-1]
} }
if addBucket {
remote = path.Join(bucket, remote)
}
// Send object // Send object
err = fn(remote, file, isDirectory) err = fn(remote, file, isDirectory)
if err != nil { if err != nil {
@ -688,19 +704,10 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
return o, nil return o, nil
} }
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
last := "" last := ""
err = f.list(ctx, dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last) entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
if err != nil { if err != nil {
return err return err
@ -714,7 +721,7 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
return nil, err return nil, err
} }
// bucket must be present if listing succeeded // bucket must be present if listing succeeded
f.markBucketOK() f.cache.MarkOK(bucket)
return entries, nil return entries, nil
} }
@ -744,10 +751,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" { bucket, directory := f.split(dir)
return f.listBuckets(dir) if bucket == "" {
return f.listBuckets(directory)
} }
return f.listDir(ctx, dir) return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@ -767,23 +775,42 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" { bucket, directory := f.split(dir)
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
last := "" listR := func(bucket, directory, prefix string, addBucket bool) error {
err = f.list(ctx, dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { last := ""
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last) return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets("")
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil { if err != nil {
return err return err
} }
return list.Add(entry)
})
if err != nil {
return err
} }
// bucket must be present if listing succeeded // bucket must be present if listing succeeded
f.markBucketOK() f.cache.MarkOK(bucket)
return list.Flush() return list.Flush()
} }
@ -809,8 +836,21 @@ func (f *Fs) listBucketsToFn(fn listBucketFn) error {
if err != nil { if err != nil {
return err return err
} }
f.bucketIDMutex.Lock()
f.bucketTypeMutex.Lock()
f._bucketID = make(map[string]string, 1)
f._bucketType = make(map[string]string, 1)
for i := range response.Buckets { for i := range response.Buckets {
err = fn(&response.Buckets[i]) bucket := &response.Buckets[i]
f.cache.MarkOK(bucket.Name)
f._bucketID[bucket.Name] = bucket.ID
f._bucketType[bucket.Name] = bucket.Type
}
f.bucketTypeMutex.Unlock()
f.bucketIDMutex.Unlock()
for i := range response.Buckets {
bucket := &response.Buckets[i]
err = fn(bucket)
if err != nil { if err != nil {
return err return err
} }
@ -820,72 +860,72 @@ func (f *Fs) listBucketsToFn(fn listBucketFn) error {
// getbucketType finds the bucketType for the current bucket name // getbucketType finds the bucketType for the current bucket name
// can be one of allPublic. allPrivate, or snapshot // can be one of allPublic. allPrivate, or snapshot
func (f *Fs) getbucketType() (bucketType string, err error) { func (f *Fs) getbucketType(bucket string) (bucketType string, err error) {
f.bucketTypeMutex.Lock() f.bucketTypeMutex.Lock()
defer f.bucketTypeMutex.Unlock() bucketType = f._bucketType[bucket]
if f._bucketType != "" { f.bucketTypeMutex.Unlock()
return f._bucketType, nil if bucketType != "" {
return bucketType, nil
} }
err = f.listBucketsToFn(func(bucket *api.Bucket) error { err = f.listBucketsToFn(func(bucket *api.Bucket) error {
if bucket.Name == f.bucket { // listBucketsToFn reads bucket Types
bucketType = bucket.Type
}
return nil return nil
}) })
f.bucketTypeMutex.Lock()
bucketType = f._bucketType[bucket]
f.bucketTypeMutex.Unlock()
if bucketType == "" { if bucketType == "" {
err = fs.ErrorDirNotFound err = fs.ErrorDirNotFound
} }
f._bucketType = bucketType
return bucketType, err return bucketType, err
} }
// setBucketType sets the Type for the current bucket name // setBucketType sets the Type for the current bucket name
func (f *Fs) setBucketType(Type string) { func (f *Fs) setBucketType(bucket string, Type string) {
f.bucketTypeMutex.Lock() f.bucketTypeMutex.Lock()
f._bucketType = Type f._bucketType[bucket] = Type
f.bucketTypeMutex.Unlock() f.bucketTypeMutex.Unlock()
} }
// clearBucketType clears the Type for the current bucket name // clearBucketType clears the Type for the current bucket name
func (f *Fs) clearBucketType() { func (f *Fs) clearBucketType(bucket string) {
f.bucketTypeMutex.Lock() f.bucketTypeMutex.Lock()
f._bucketType = "" delete(f._bucketType, bucket)
f.bucketTypeMutex.Unlock() f.bucketTypeMutex.Unlock()
} }
// getBucketID finds the ID for the current bucket name // getBucketID finds the ID for the current bucket name
func (f *Fs) getBucketID() (bucketID string, err error) { func (f *Fs) getBucketID(bucket string) (bucketID string, err error) {
f.bucketIDMutex.Lock() f.bucketIDMutex.Lock()
defer f.bucketIDMutex.Unlock() bucketID = f._bucketID[bucket]
if f._bucketID != "" { f.bucketIDMutex.Unlock()
return f._bucketID, nil if bucketID != "" {
return bucketID, nil
} }
err = f.listBucketsToFn(func(bucket *api.Bucket) error { err = f.listBucketsToFn(func(bucket *api.Bucket) error {
if bucket.Name == f.bucket { // listBucketsToFn sets IDs
bucketID = bucket.ID
}
return nil return nil
}) })
f.bucketIDMutex.Lock()
bucketID = f._bucketID[bucket]
f.bucketIDMutex.Unlock()
if bucketID == "" { if bucketID == "" {
err = fs.ErrorDirNotFound err = fs.ErrorDirNotFound
} }
f._bucketID = bucketID
return bucketID, err return bucketID, err
} }
// setBucketID sets the ID for the current bucket name // setBucketID sets the ID for the current bucket name
func (f *Fs) setBucketID(ID string) { func (f *Fs) setBucketID(bucket, ID string) {
f.bucketIDMutex.Lock() f.bucketIDMutex.Lock()
f._bucketID = ID f._bucketID[bucket] = ID
f.bucketIDMutex.Unlock() f.bucketIDMutex.Unlock()
} }
// clearBucketID clears the ID for the current bucket name // clearBucketID clears the ID for the current bucket name
func (f *Fs) clearBucketID() { func (f *Fs) clearBucketID(bucket string) {
f.bucketIDMutex.Lock() f.bucketIDMutex.Lock()
f._bucketID = "" delete(f._bucketID, bucket)
f.bucketIDMutex.Unlock() f.bucketIDMutex.Unlock()
} }
@ -910,83 +950,79 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the bucket if it doesn't exist // Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock() bucket, _ := f.split(dir)
defer f.bucketOKMu.Unlock() return f.cache.Create(bucket, func() error {
if f.bucketOK { opts := rest.Opts{
return nil Method: "POST",
} Path: "/b2_create_bucket",
opts := rest.Opts{ }
Method: "POST", var request = api.CreateBucketRequest{
Path: "/b2_create_bucket", AccountID: f.info.AccountID,
} Name: bucket,
var request = api.CreateBucketRequest{ Type: "allPrivate",
AccountID: f.info.AccountID, }
Name: f.bucket, var response api.Bucket
Type: "allPrivate", err := f.pacer.Call(func() (bool, error) {
} resp, err := f.srv.CallJSON(&opts, &request, &response)
var response api.Bucket return f.shouldRetry(resp, err)
err := f.pacer.Call(func() (bool, error) { })
resp, err := f.srv.CallJSON(&opts, &request, &response) if err != nil {
return f.shouldRetry(resp, err) if apiErr, ok := err.(*api.Error); ok {
}) if apiErr.Code == "duplicate_bucket_name" {
if err != nil { // Check this is our bucket - buckets are globally unique and this
if apiErr, ok := err.(*api.Error); ok { // might be someone elses.
if apiErr.Code == "duplicate_bucket_name" { _, getBucketErr := f.getBucketID(bucket)
// Check this is our bucket - buckets are globally unique and this if getBucketErr == nil {
// might be someone elses. // found so it is our bucket
_, getBucketErr := f.getBucketID() return nil
if getBucketErr == nil { }
// found so it is our bucket if getBucketErr != fs.ErrorDirNotFound {
f.bucketOK = true fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
return nil }
}
if getBucketErr != fs.ErrorDirNotFound {
fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
} }
} }
return errors.Wrap(err, "failed to create bucket")
} }
return errors.Wrap(err, "failed to create bucket") f.setBucketID(bucket, response.ID)
} f.setBucketType(bucket, response.Type)
f.setBucketID(response.ID) return nil
f.setBucketType(response.Type) }, nil)
f.bucketOK = true
return nil
} }
// Rmdir deletes the bucket if the fs is at the root // Rmdir deletes the bucket if the fs is at the root
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock() bucket, directory := f.split(dir)
defer f.bucketOKMu.Unlock() if bucket == "" || directory != "" {
if f.root != "" || dir != "" {
return nil return nil
} }
opts := rest.Opts{ return f.cache.Remove(bucket, func() error {
Method: "POST", opts := rest.Opts{
Path: "/b2_delete_bucket", Method: "POST",
} Path: "/b2_delete_bucket",
bucketID, err := f.getBucketID() }
if err != nil { bucketID, err := f.getBucketID(bucket)
return err if err != nil {
} return err
var request = api.DeleteBucketRequest{ }
ID: bucketID, var request = api.DeleteBucketRequest{
AccountID: f.info.AccountID, ID: bucketID,
} AccountID: f.info.AccountID,
var response api.Bucket }
err = f.pacer.Call(func() (bool, error) { var response api.Bucket
resp, err := f.srv.CallJSON(&opts, &request, &response) err = f.pacer.Call(func() (bool, error) {
return f.shouldRetry(resp, err) resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to delete bucket")
}
f.clearBucketID(bucket)
f.clearBucketType(bucket)
f.clearUploadURL(bucketID)
return nil
}) })
if err != nil {
return errors.Wrap(err, "failed to delete bucket")
}
f.bucketOK = false
f.clearBucketID()
f.clearBucketType()
f.clearUploadURL()
return nil
} }
// Precision of the remote // Precision of the remote
@ -995,8 +1031,8 @@ func (f *Fs) Precision() time.Duration {
} }
// hide hides a file on the remote // hide hides a file on the remote
func (f *Fs) hide(Name string) error { func (f *Fs) hide(bucket, bucketPath string) error {
bucketID, err := f.getBucketID() bucketID, err := f.getBucketID(bucket)
if err != nil { if err != nil {
return err return err
} }
@ -1006,7 +1042,7 @@ func (f *Fs) hide(Name string) error {
} }
var request = api.HideFileRequest{ var request = api.HideFileRequest{
BucketID: bucketID, BucketID: bucketID,
Name: Name, Name: bucketPath,
} }
var response api.File var response api.File
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@ -1021,7 +1057,7 @@ func (f *Fs) hide(Name string) error {
return nil return nil
} }
} }
return errors.Wrapf(err, "failed to hide %q", Name) return errors.Wrapf(err, "failed to hide %q", bucketPath)
} }
return nil return nil
} }
@ -1052,7 +1088,10 @@ func (f *Fs) deleteByID(ID, Name string) error {
// if oldOnly is true then it deletes only non current files. // if oldOnly is true then it deletes only non current files.
// //
// Implemented here so we can make sure we delete old versions. // Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, oldOnly bool) error { func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
if bucket == "" {
return errors.New("can't purge from root")
}
var errReturn error var errReturn error
var checkErrMutex sync.Mutex var checkErrMutex sync.Mutex
var checkErr = func(err error) { var checkErr = func(err error) {
@ -1093,7 +1132,7 @@ func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
}() }()
} }
last := "" last := ""
checkErr(f.list(ctx, "", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error { checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory { if !isDirectory {
oi, err := f.newObjectWithInfo(ctx, object.Name, object) oi, err := f.newObjectWithInfo(ctx, object.Name, object)
if err != nil { if err != nil {
@ -1101,6 +1140,7 @@ func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
} }
tr := accounting.Stats(ctx).NewCheckingTransfer(oi) tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
if oldOnly && last != remote { if oldOnly && last != remote {
// Check current version of the file
if object.Action == "hide" { if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID) fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
toBeDeleted <- object toBeDeleted <- object
@ -1130,12 +1170,12 @@ func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
// Purge deletes all the files and directories including the old versions. // Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge(ctx context.Context) error {
return f.purge(ctx, false) return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
} }
// CleanUp deletes all the hidden files. // CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, true) return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
@ -1148,6 +1188,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.Mkdir(ctx, "") err := f.Mkdir(ctx, "")
if err != nil { if err != nil {
return nil, err return nil, err
@ -1157,7 +1198,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
destBucketID, err := f.getBucketID() destBucketID, err := f.getBucketID(dstBucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1167,7 +1208,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
var request = api.CopyFileRequest{ var request = api.CopyFileRequest{
SourceID: srcObj.id, SourceID: srcObj.id,
Name: f.root + remote, Name: dstPath,
MetadataDirective: "COPY", MetadataDirective: "COPY",
DestBucketID: destBucketID, DestBucketID: destBucketID,
} }
@ -1196,8 +1237,8 @@ func (f *Fs) Hashes() hash.Set {
} }
// getDownloadAuthorization returns authorization token for downloading // getDownloadAuthorization returns authorization token for downloading
// without accout. // without account.
func (f *Fs) getDownloadAuthorization(remote string) (authorization string, err error) { func (f *Fs) getDownloadAuthorization(bucket, remote string) (authorization string, err error) {
validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9 validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 { if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week") return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
@ -1205,7 +1246,7 @@ func (f *Fs) getDownloadAuthorization(remote string) (authorization string, err
if !f.hasPermission("shareFiles") { if !f.hasPermission("shareFiles") {
return "", errors.New("sharing a file link requires the shareFiles permission") return "", errors.New("sharing a file link requires the shareFiles permission")
} }
bucketID, err := f.getBucketID() bucketID, err := f.getBucketID(bucket)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -1229,8 +1270,9 @@ func (f *Fs) getDownloadAuthorization(remote string) (authorization string, err
return response.AuthorizationToken, nil return response.AuthorizationToken, nil
} }
// PublicLink returns a link for downloading without accout. // PublicLink returns a link for downloading without account
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
bucket, bucketPath := f.split(remote)
var RootURL string var RootURL string
if f.opt.DownloadURL == "" { if f.opt.DownloadURL == "" {
RootURL = f.info.DownloadURL RootURL = f.info.DownloadURL
@ -1239,7 +1281,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
} }
_, err = f.NewObject(ctx, remote) _, err = f.NewObject(ctx, remote)
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile { if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
err2 := f.list(ctx, remote, false, "", 1, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { err2 := f.list(ctx, bucket, bucketPath, f.rootDirectory, f.rootBucket == "", false, 1, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
err = nil err = nil
return nil return nil
}) })
@ -1250,14 +1292,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
if err != nil { if err != nil {
return "", err return "", err
} }
absPath := "/" + path.Join(f.root, remote) absPath := "/" + bucketPath
link = RootURL + "/file/" + urlEncode(f.bucket) + absPath link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType() bucketType, err := f.getbucketType(bucket)
if err != nil { if err != nil {
return "", err return "", err
} }
if bucketType == "allPrivate" || bucketType == "snapshot" { if bucketType == "allPrivate" || bucketType == "snapshot" {
AuthorizationToken, err := f.getDownloadAuthorization(remote) AuthorizationToken, err := f.getDownloadAuthorization(bucket, remote)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -1351,19 +1393,19 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
// getMetaData gets the metadata from the object unconditionally // getMetaData gets the metadata from the object unconditionally
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) { func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
bucket, bucketPath := o.split()
maxSearched := 1 maxSearched := 1
var timestamp api.Timestamp var timestamp api.Timestamp
baseRemote := o.remote
if o.fs.opt.Versions { if o.fs.opt.Versions {
timestamp, baseRemote = api.RemoveVersion(baseRemote) timestamp, bucketPath = api.RemoveVersion(bucketPath)
maxSearched = maxVersions maxSearched = maxVersions
} }
err = o.fs.list(ctx, "", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { err = o.fs.list(ctx, bucket, bucketPath, "", false, true, maxSearched, o.fs.opt.Versions, true, func(remote string, object *api.File, isDirectory bool) error {
if isDirectory { if isDirectory {
return nil return nil
} }
if remote == baseRemote { if remote == bucketPath {
if !timestamp.IsZero() && !timestamp.Equal(object.UploadTimestamp) { if !timestamp.IsZero() && !timestamp.Equal(object.UploadTimestamp) {
return nil return nil
} }
@ -1441,6 +1483,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if err != nil { if err != nil {
return err return err
} }
_, bucketPath := o.split()
info.Info[timeKey] = timeString(modTime) info.Info[timeKey] = timeString(modTime)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@ -1448,7 +1491,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
} }
var request = api.CopyFileRequest{ var request = api.CopyFileRequest{
SourceID: o.id, SourceID: o.id,
Name: o.fs.root + o.remote, // copy to same name Name: bucketPath, // copy to same name
MetadataDirective: "REPLACE", MetadataDirective: "REPLACE",
ContentType: info.ContentType, ContentType: info.ContentType,
Info: info.Info, Info: info.Info,
@ -1549,7 +1592,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.id != "" { if o.id != "" {
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id) opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
} else { } else {
opts.Path += "/file/" + urlEncode(o.fs.bucket) + "/" + urlEncode(o.fs.root+o.remote) bucket, bucketPath := o.split()
opts.Path += "/file/" + urlEncode(bucket) + "/" + urlEncode(bucketPath)
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@ -1632,6 +1676,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
size := src.Size() size := src.Size()
bucket, bucketPath := o.split()
if size == -1 { if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks) // Check if the file is large enough for a chunked upload (needs to be at least two chunks)
buf := o.fs.getUploadBlock() buf := o.fs.getUploadBlock()
@ -1677,7 +1722,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Get upload URL // Get upload URL
upload, err := o.fs.getUploadURL() upload, err := o.fs.getUploadURL(bucket)
if err != nil { if err != nil {
return err return err
} }
@ -1745,7 +1790,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Body: in, Body: in,
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken, "Authorization": upload.AuthorizationToken,
"X-Bz-File-Name": urlEncode(o.fs.root + o.remote), "X-Bz-File-Name": urlEncode(bucketPath),
"Content-Type": fs.MimeType(ctx, src), "Content-Type": fs.MimeType(ctx, src),
sha1Header: calculatedSha1, sha1Header: calculatedSha1,
timeHeader: timeString(modTime), timeHeader: timeString(modTime),
@ -1772,13 +1817,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
if o.fs.opt.Versions { if o.fs.opt.Versions {
return errNotWithVersions return errNotWithVersions
} }
if o.fs.opt.HardDelete { if o.fs.opt.HardDelete {
return o.fs.deleteByID(o.id, o.fs.root+o.remote) return o.fs.deleteByID(o.id, bucketPath)
} }
return o.fs.hide(o.fs.root + o.remote) return o.fs.hide(bucket, bucketPath)
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise

View File

@ -104,13 +104,14 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
Method: "POST", Method: "POST",
Path: "/b2_start_large_file", Path: "/b2_start_large_file",
} }
bucketID, err := f.getBucketID() bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(bucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var request = api.StartLargeFileRequest{ var request = api.StartLargeFileRequest{
BucketID: bucketID, BucketID: bucketID,
Name: o.fs.root + remote, Name: bucketPath,
ContentType: fs.MimeType(ctx, src), ContentType: fs.MimeType(ctx, src),
Info: map[string]string{ Info: map[string]string{
timeKey: timeString(modTime), timeKey: timeString(modTime),