azureblob: Added blob tier feature, a new configuration of azureblob-access-tier

is added to tier blobs between Hot, Cool and Archive. Addresses #2901
This commit is contained in:
sandeepkru 2018-08-19 09:53:59 -07:00 committed by Nick Craig-Wood
parent 9f671c5dd0
commit 3751ceebdd
2 changed files with 62 additions and 1 deletions

View File

@ -48,6 +48,7 @@ const (
maxChunkSize = 100 * 1024 * 1024
defaultUploadCutoff = 256 * 1024 * 1024
maxUploadCutoff = 256 * 1024 * 1024
defaultAccessTier = azblob.AccessTierNone
)
// Register with Fs
@ -79,6 +80,11 @@ func init() {
Help: "Upload chunk size. Must fit in memory.",
Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true,
}, {
Name: "access_tier",
Help: "Access tier of blob, supports hot, cool and archive tiers.\nArchived blobs can be restored by setting access tier to hot or cool." +
" Leave blank if you intend to use default access tier, which is set at account level",
Advanced: true,
}},
})
}
@ -91,6 +97,7 @@ type Options struct {
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
AccessTier string `config:"access_tier"`
}
// Fs represents a remote azure server
@ -212,6 +219,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt.Endpoint = storageDefaultBaseURL
}
if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier)
} else {
switch opt.AccessTier {
case string(azblob.AccessTierHot):
case string(azblob.AccessTierCool):
case string(azblob.AccessTierArchive):
// valid cases
default:
return nil, errors.Errorf("azure: Supported access tiers are %s, %s and %s", string(azblob.AccessTierHot), string(azblob.AccessTierCool), azblob.AccessTierArchive)
}
}
var (
u *url.URL
serviceURL azblob.ServiceURL
@ -946,6 +966,10 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download
var offset int64
var count int64
if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
}
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
@ -1199,8 +1223,29 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if err != nil {
return err
}
// Refresh metadata on object
o.clearMetaData()
return o.readMetaData()
err = o.readMetaData()
if err != nil {
return err
}
// If tier is not changed or not specified, do not attempt to invoke `SetBlobTier` operation
if o.fs.opt.AccessTier == string(defaultAccessTier) || o.fs.opt.AccessTier == string(o.AccessTier()) {
return nil
}
// Now, set blob tier based on configured access tier
desiredAccessTier := azblob.AccessTierType(o.fs.opt.AccessTier)
err = o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetTier(ctx, desiredAccessTier)
return o.fs.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Failed to set Blob Tier")
}
return nil
}
// Remove an object
@ -1220,6 +1265,11 @@ func (o *Object) MimeType() string {
return o.mimeType
}
// AccessTier of an object, default is of type none
func (o *Object) AccessTier() azblob.AccessTierType {
return o.accessTier
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}

View File

@ -184,6 +184,17 @@ Upload chunk size. Default 4MB. Note that this is stored in memory
and there may be up to `--transfers` chunks stored at once in memory.
This can be at most 100MB.
#### --azureblob-access-tier=Hot/Cool/Archive ####
Azure storage supports blob tiering, you can configure tier in advanced
settings or supply flag while performing data transfer operations.
If there is no `access tier` specified, rclone doesn't apply any tier.
rclone performs `Set Tier` operation on blobs while uploading, if objects
are not modified, specifying `access tier` to new one will have no effect.
If blobs are in `archive tier` at remote, trying to perform data transfer
operations from remote will not be allowed. User should first restore by
tiering blob to `Hot` or `Cool`.
### Limitations ###
MD5 sums are only uploaded with chunked files if the source has an MD5