fs: Add --max-delete-size a delete size threshold

Fixes #3329
This commit is contained in:
Leandro Sacchet 2022-08-03 11:53:02 -03:00 committed by Nick Craig-Wood
parent fb4600f6f9
commit f689db4422
6 changed files with 83 additions and 0 deletions

View File

@ -1334,6 +1334,14 @@ This tells rclone not to delete more than N files. If that limit is
exceeded then a fatal error will be generated and rclone will stop the
operation in progress.
### --max-delete-size=SIZE ###
Rclone will stop deleting files when the total size of deletions has
reached the size specified. It defaults to off.
If that limit is exceeded then a fatal error will be generated and
rclone will stop the operation in progress.
### --max-depth=N ###
This modifies the recursion depth for all the commands except purge.

View File

@ -48,6 +48,7 @@ type StatsInfo struct {
renameQueue int
renameQueueSize int64
deletes int64
deletesSize int64
deletedDirs int64
inProgress *inProgress
startedTransfers []*Transfer // currently active transfers
@ -598,6 +599,14 @@ func (s *StatsInfo) Deletes(deletes int64) int64 {
return s.deletes
}
// DeletesSize updates the stats for deletes size
func (s *StatsInfo) DeletesSize(deletesSize int64) int64 {
s.mu.Lock()
defer s.mu.Unlock()
s.deletesSize += deletesSize
return s.deletesSize
}
// DeletedDirs updates the stats for deletedDirs
func (s *StatsInfo) DeletedDirs(deletedDirs int64) int64 {
s.mu.Lock()
@ -627,6 +636,7 @@ func (s *StatsInfo) ResetCounters() {
s.checks = 0
s.transfers = 0
s.deletes = 0
s.deletesSize = 0
s.deletedDirs = 0
s.renames = 0
s.startedTransfers = nil

View File

@ -71,6 +71,7 @@ type ConfigInfo struct {
InsecureSkipVerify bool // Skip server certificate verification
DeleteMode DeleteMode
MaxDelete int64
MaxDeleteSize SizeSuffix
TrackRenames bool // Track file renames.
TrackRenamesStrategy string // Comma separated list of strategies used to track renames
LowLevelRetries int
@ -162,6 +163,7 @@ func NewConfig() *ConfigInfo {
c.ExpectContinueTimeout = 1 * time.Second
c.DeleteMode = DeleteModeDefault
c.MaxDelete = -1
c.MaxDeleteSize = SizeSuffix(-1)
c.LowLevelRetries = 10
c.MaxDepth = -1
c.DataRateUnit = "bytes"

View File

@ -71,6 +71,7 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer")
flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)")
flags.Int64VarP(flagSet, &ci.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
flags.FVarP(flagSet, &ci.MaxDeleteSize, "max-delete-size", "", "When synchronizing, limit the total size of deletes")
flags.BoolVarP(flagSet, &ci.TrackRenames, "track-renames", "", ci.TrackRenames, "When synchronizing, track file renames and do a server-side move if possible")
flags.StringVarP(flagSet, &ci.TrackRenamesStrategy, "track-renames-strategy", "", ci.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf")
flags.IntVarP(flagSet, &ci.LowLevelRetries, "low-level-retries", "", ci.LowLevelRetries, "Number of low level retries to do")

View File

@ -637,6 +637,12 @@ func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs
defer func() {
tr.Done(ctx, err)
}()
deletesSize := accounting.Stats(ctx).DeletesSize(0) // file not yet deleted, we should not add at this time
size := dst.Size()
if int64(ci.MaxDeleteSize) != -1 && (deletesSize+size) > int64(ci.MaxDeleteSize) {
return fserrors.FatalError(errors.New("--max-delete-size threshold reached"))
}
_ = accounting.Stats(ctx).DeletesSize(size) // here we count
numDeletes := accounting.Stats(ctx).Deletes(1)
if ci.MaxDelete != -1 && numDeletes > ci.MaxDelete {
return fserrors.FatalError(errors.New("--max-delete threshold reached"))

View File

@ -419,6 +419,62 @@ func TestDelete(t *testing.T) {
r.CheckRemoteItems(t, file3)
}
func TestMaxDelete(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
accounting.GlobalStats().ResetCounters()
ci.MaxDelete = 2
defer r.Finalise()
file1 := r.WriteObject(ctx, "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
r.CheckRemoteItems(t, file1, file2, file3)
err := operations.Delete(ctx, r.Fremote)
require.Error(t, err)
objects, _, _, err := operations.Count(ctx, r.Fremote)
require.NoError(t, err)
assert.Equal(t, int64(1), objects)
}
// TestMaxDeleteSizeLargeFile one of the files is larger than allowed
func TestMaxDeleteSizeLargeFile(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
accounting.GlobalStats().ResetCounters()
ci.MaxDeleteSize = 70
defer r.Finalise()
file1 := r.WriteObject(ctx, "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
r.CheckRemoteItems(t, file1, file2, file3)
err := operations.Delete(ctx, r.Fremote)
require.Error(t, err)
r.CheckRemoteItems(t, file3)
}
func TestMaxDeleteSize(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
accounting.GlobalStats().ResetCounters()
ci.MaxDeleteSize = 160
defer r.Finalise()
file1 := r.WriteObject(ctx, "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
r.CheckRemoteItems(t, file1, file2, file3)
err := operations.Delete(ctx, r.Fremote)
require.Error(t, err)
objects, _, _, err := operations.Count(ctx, r.Fremote)
require.NoError(t, err)
assert.Equal(t, int64(1), objects) // 10 or 100 bytes
}
func TestRetry(t *testing.T) {
ctx := context.Background()