s3: make reading metadata more reliable to work around eventual consistency problems

This commit is contained in:
Nick Craig-Wood 2014-07-28 22:32:15 +01:00
parent 2f9f9afac2
commit 0b51d6221a
1 changed files with 16 additions and 1 deletions

View File

@ -421,13 +421,28 @@ func (o *FsObjectS3) Size() int64 {
// readMetaData gets the metadata if it hasn't already been fetched
//
// if we get a 404 error then we retry a few times for eventual
// consistency reasons
//
// it also sets the info
func (o *FsObjectS3) readMetaData() (err error) {
if o.meta != nil {
return nil
}
var headers s3.Headers
headers, err := o.s3.b.Head(o.s3.root+o.remote, nil)
// Try reading the metadata a few times (with exponential
// backoff) to get around eventual consistency on 404 error
for tries := uint(0); tries < 10; tries++ {
headers, err = o.s3.b.Head(o.s3.root+o.remote, nil)
if s3Err, ok := err.(*s3.Error); ok {
if s3Err.StatusCode == http.StatusNotFound {
time.Sleep(5 * time.Millisecond << tries)
continue
}
}
break
}
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err