Exponential Backoff for ByteFIFO (#15724)

This PR is another in the vein of queue improvements. It suggests an
exponential backoff for bytefifo queues to reduce the load from queue
polling. This will mostly be useful for redis queues.

Signed-off-by: Andrew Thornton <art27@cantab.net>

Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
zeripath 2021-05-08 17:29:47 +01:00 committed by GitHub
parent 2a9b8d173a
commit e22ee468cf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 59 additions and 29 deletions

View File

@ -114,41 +114,71 @@ func (q *ByteFIFOQueue) Run(atShutdown, atTerminate func(context.Context, func()
}
func (q *ByteFIFOQueue) readToChan() {
for {
// handle quick cancels
select {
case <-q.closed:
// tell the pool to shutdown.
q.cancel()
return
default:
q.lock.Lock()
bs, err := q.byteFIFO.Pop()
if err != nil {
q.lock.Unlock()
log.Error("%s: %s Error on Pop: %v", q.typ, q.name, err)
time.Sleep(time.Millisecond * 100)
continue
}
if len(bs) == 0 {
q.lock.Unlock()
time.Sleep(time.Millisecond * 100)
continue
backOffTime := time.Millisecond * 100
maxBackOffTime := time.Second * 3
for {
success, resetBackoff := q.doPop()
if resetBackoff {
backOffTime = 100 * time.Millisecond
}
if success {
select {
case <-q.closed:
// tell the pool to shutdown.
q.cancel()
return
default:
}
} else {
select {
case <-q.closed:
// tell the pool to shutdown.
q.cancel()
return
case <-time.After(backOffTime):
}
backOffTime += backOffTime / 2
if backOffTime > maxBackOffTime {
backOffTime = maxBackOffTime
}
}
}
}
func (q *ByteFIFOQueue) doPop() (success, resetBackoff bool) {
q.lock.Lock()
defer q.lock.Unlock()
bs, err := q.byteFIFO.Pop()
if err != nil {
log.Error("%s: %s Error on Pop: %v", q.typ, q.name, err)
return
}
if len(bs) == 0 {
return
}
resetBackoff = true
data, err := unmarshalAs(bs, q.exemplar)
if err != nil {
log.Error("%s: %s Failed to unmarshal with error: %v", q.typ, q.name, err)
q.lock.Unlock()
time.Sleep(time.Millisecond * 100)
continue
return
}
log.Trace("%s %s: Task found: %#v", q.typ, q.name, data)
q.WorkerPool.Push(data)
q.lock.Unlock()
}
}
success = true
return
}
// Shutdown processing from this queue