bisync: support two --backup-dir paths on different remotes

Before this change, bisync supported `--backup-dir` only when `Path1` and
`Path2` were different paths on the same remote. With this change, bisync
introduces new `--backup-dir1` and `--backup-dir2` flags to support separate
backup-dirs for `Path1` and `Path2`.

`--backup-dir1` and `--backup-dir2` can use different remotes from each other,
but `--backup-dir1` must use the same remote as `Path1`, and `--backup-dir2`
must use the same remote as `Path2`. Each backup directory must not overlap its
respective bisync Path without being excluded by a filter rule.

The standard `--backup-dir` will also work, if both paths use the same remote
(but note that deleted files from both paths would be mixed together in the
same dir). If either `--backup-dir1` and `--backup-dir2` are set, they will
override `--backup-dir`.
This commit is contained in:
nielash 2023-11-12 10:34:38 -05:00
parent 9cf783677e
commit bbf9b1b3d2
6 changed files with 81 additions and 2 deletions

View File

@ -40,6 +40,9 @@ type Options struct {
Force bool Force bool
FiltersFile string FiltersFile string
Workdir string Workdir string
OrigBackupDir string
BackupDir1 string
BackupDir2 string
DryRun bool DryRun bool
NoCleanup bool NoCleanup bool
SaveQueues bool // save extra debugging files (test only flag) SaveQueues bool // save extra debugging files (test only flag)
@ -107,6 +110,8 @@ func init() {
Opt.Retries = 3 Opt.Retries = 3
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
// when adding new flags, remember to also update the rc params:
// cmd/bisync/rc.go cmd/bisync/help.go (not docs/content/rc.md)
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "") flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "")
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "") flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "")
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "") flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "")
@ -116,6 +121,8 @@ func init() {
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "") flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "")
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "") flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "")
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "") flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "")
flags.StringVarP(cmdFlags, &Opt.BackupDir1, "backup-dir1", "", Opt.BackupDir1, "--backup-dir for Path1. Must be a non-overlapping path on the same remote.", "")
flags.StringVarP(cmdFlags, &Opt.BackupDir2, "backup-dir2", "", Opt.BackupDir2, "--backup-dir for Path2. Must be a non-overlapping path on the same remote.", "")
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "") flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "") flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "") flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")

View File

@ -346,6 +346,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
} else { } else {
fs.Debugf(nil, "Files are NOT equal: %s", file) fs.Debugf(nil, "Files are NOT equal: %s", file)
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy") b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
ctxMove = b.setBackupDir(ctxMove, 1) // in case already a file with new name
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil { if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err) err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
b.critical = true b.critical = true
@ -360,6 +361,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
copy1to2.Add(file + "..path1") copy1to2.Add(file + "..path1")
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy") b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
ctxMove = b.setBackupDir(ctxMove, 2) // in case already a file with new name
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, alias+"..path2", alias); err != nil { if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, alias+"..path2", alias); err != nil {
err = fmt.Errorf("path2 rename failed for %s: %w", alias, err) err = fmt.Errorf("path2 rename failed for %s: %w", alias, err)
return return
@ -426,6 +428,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if copy2to1.NotEmpty() { if copy2to1.NotEmpty() {
changes1 = true changes1 = true
b.indent("Path2", "Path1", "Do queued copies to") b.indent("Path2", "Path1", "Do queued copies to")
ctx = b.setBackupDir(ctx, 1)
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1") results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
// retries, if any // retries, if any
@ -442,6 +445,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if copy1to2.NotEmpty() { if copy1to2.NotEmpty() {
changes2 = true changes2 = true
b.indent("Path1", "Path2", "Do queued copies to") b.indent("Path1", "Path2", "Do queued copies to")
ctx = b.setBackupDir(ctx, 2)
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2") results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
// retries, if any // retries, if any

View File

@ -10,7 +10,7 @@ func makeHelp(help string) string {
"|", "`", "|", "`",
"{MAXDELETE}", strconv.Itoa(DefaultMaxDelete), "{MAXDELETE}", strconv.Itoa(DefaultMaxDelete),
"{CHECKFILE}", DefaultCheckFilename, "{CHECKFILE}", DefaultCheckFilename,
"{WORKDIR}", DefaultWorkdir, // "{WORKDIR}", DefaultWorkdir,
) )
return replacer.Replace(help) return replacer.Replace(help)
} }
@ -37,7 +37,9 @@ var rcHelp = makeHelp(`This takes the following parameters
- ignoreListingChecksum - Do not use checksums for listings - ignoreListingChecksum - Do not use checksums for listings
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync. - resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
Use at your own risk! Use at your own risk!
- workdir - server directory for history files (default: {WORKDIR}) - workdir - server directory for history files (default: |~/.cache/rclone/bisync|)
- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
- noCleanup - retain working files - noCleanup - retain working files
See [bisync command help](https://rclone.org/commands/rclone_bisync/) See [bisync command help](https://rclone.org/commands/rclone_bisync/)

View File

@ -68,6 +68,8 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
if opt.Workdir == "" { if opt.Workdir == "" {
opt.Workdir = DefaultWorkdir opt.Workdir = DefaultWorkdir
} }
ci := fs.GetConfig(ctx)
opt.OrigBackupDir = ci.BackupDir
if !opt.DryRun && !opt.Force { if !opt.DryRun && !opt.Force {
if fs1.Precision() == fs.ModTimeNotSupported { if fs1.Precision() == fs.ModTimeNotSupported {
@ -358,7 +360,9 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Optional rmdirs for empty directories // Optional rmdirs for empty directories
if opt.RemoveEmptyDirs { if opt.RemoveEmptyDirs {
fs.Infof(nil, "Removing empty directories") fs.Infof(nil, "Removing empty directories")
fctx = b.setBackupDir(fctx, 1)
err1 := operations.Rmdirs(fctx, b.fs1, "", true) err1 := operations.Rmdirs(fctx, b.fs1, "", true)
fctx = b.setBackupDir(fctx, 2)
err2 := operations.Rmdirs(fctx, b.fs2, "", true) err2 := operations.Rmdirs(fctx, b.fs2, "", true)
err := err1 err := err1
if err == nil { if err == nil {
@ -445,6 +449,8 @@ func (b *bisyncRun) resync(octx, fctx context.Context) error {
} }
ci := fs.GetConfig(ctxSync) ci := fs.GetConfig(ctxSync)
ci.IgnoreExisting = true ci.IgnoreExisting = true
ctxSync = b.setBackupDir(ctxSync, 1)
// 2 to 1
if results2to1, err = b.resyncDir(ctxSync, b.fs2, b.fs1); err != nil { if results2to1, err = b.resyncDir(ctxSync, b.fs2, b.fs1); err != nil {
b.critical = true b.critical = true
return err return err
@ -452,6 +458,8 @@ func (b *bisyncRun) resync(octx, fctx context.Context) error {
b.indent("Path1", "Path2", "Resync is copying UNIQUE OR DIFFERING files to") b.indent("Path1", "Path2", "Resync is copying UNIQUE OR DIFFERING files to")
ci.IgnoreExisting = false ci.IgnoreExisting = false
ctxSync = b.setBackupDir(ctxSync, 2)
// 1 to 2
if results1to2, err = b.resyncDir(ctxSync, b.fs1, b.fs2); err != nil { if results1to2, err = b.resyncDir(ctxSync, b.fs1, b.fs2); err != nil {
b.critical = true b.critical = true
return err return err
@ -581,3 +589,17 @@ func (b *bisyncRun) handleErr(o interface{}, msg string, err error, critical, re
} }
} }
} }
// setBackupDir overrides --backup-dir with path-specific version, if set, in each direction
func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Context {
ci := fs.GetConfig(ctx)
ci.BackupDir = b.opt.OrigBackupDir
if destPath == 1 && b.opt.BackupDir1 != "" {
ci.BackupDir = b.opt.BackupDir1
}
if destPath == 2 && b.opt.BackupDir2 != "" {
ci.BackupDir = b.opt.BackupDir1
}
fs.Debugf(ci.BackupDir, "updated backup-dir for Path%d", destPath)
return ctx
}

View File

@ -74,6 +74,12 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if opt.Workdir, err = in.GetString("workdir"); rc.NotErrParamNotFound(err) { if opt.Workdir, err = in.GetString("workdir"); rc.NotErrParamNotFound(err) {
return return
} }
if opt.BackupDir1, err = in.GetString("backupdir1"); rc.NotErrParamNotFound(err) {
return
}
if opt.BackupDir2, err = in.GetString("backupdir2"); rc.NotErrParamNotFound(err) {
return
}
checkSync, err := in.GetString("checkSync") checkSync, err := in.GetString("checkSync")
if rc.NotErrParamNotFound(err) { if rc.NotErrParamNotFound(err) {

View File

@ -105,6 +105,8 @@ Optional Flags:
--no-cleanup Retain working files (useful for troubleshooting and testing). --no-cleanup Retain working files (useful for troubleshooting and testing).
--workdir PATH Use custom working directory (useful for testing). --workdir PATH Use custom working directory (useful for testing).
(default: `~/.cache/rclone/bisync`) (default: `~/.cache/rclone/bisync`)
--backup-dir1 PATH --backup-dir for Path1. Must be a non-overlapping path on the same remote.
--backup-dir2 PATH --backup-dir for Path2. Must be a non-overlapping path on the same remote.
-n, --dry-run Go through the motions - No files are copied/deleted. -n, --dry-run Go through the motions - No files are copied/deleted.
-v, --verbose Increases logging verbosity. -v, --verbose Increases logging verbosity.
May be specified more than once for more details. May be specified more than once for more details.
@ -357,6 +359,42 @@ Certain more serious errors will still enforce a `--resync` lockout, even in `--
Behavior of `--resilient` may change in a future version. Behavior of `--resilient` may change in a future version.
#### --backup-dir1 and --backup-dir2
As of `v1.65`, [`--backup-dir`](/docs/#backup-dir-dir) is supported in bisync.
Because `--backup-dir` must be a non-overlapping path on the same remote,
Bisync has introduced new `--backup-dir1` and `--backup-dir2` flags to support
separate backup-dirs for `Path1` and `Path2` (bisyncing between different
remotes with `--backup-dir` would not otherwise be possible.) `--backup-dir1`
and `--backup-dir2` can use different remotes from each other, but
`--backup-dir1` must use the same remote as `Path1`, and `--backup-dir2` must
use the same remote as `Path2`. Each backup directory must not overlap its
respective bisync Path without being excluded by a filter rule.
The standard `--backup-dir` will also work, if both paths use the same remote
(but note that deleted files from both paths would be mixed together in the
same dir). If either `--backup-dir1` and `--backup-dir2` are set, they will
override `--backup-dir`.
Example:
```
rclone bisync /Users/someuser/some/local/path/Bisync gdrive:Bisync --backup-dir1 /Users/someuser/some/local/path/BackupDir --backup-dir2 gdrive:BackupDir --suffix -2023-08-26 --suffix-keep-extension --check-access --max-delete 10 --filters-file /Users/someuser/some/local/path/bisync_filters.txt --no-cleanup --ignore-listing-checksum --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient -MvP --drive-skip-gdocs --fix-case
```
In this example, if the user deletes a file in
`/Users/someuser/some/local/path/Bisync`, bisync will propagate the delete to
the other side by moving the corresponding file from `gdrive:Bisync` to
`gdrive:BackupDir`. If the user deletes a file from `gdrive:Bisync`, bisync
moves it from `/Users/someuser/some/local/path/Bisync` to
`/Users/someuser/some/local/path/BackupDir`.
In the event of a `..path1` / `..path2` rename due to a sync conflict, the
rename is not considered a delete, unless a previous conflict with the same
name already exists and would get overwritten.
See also: [`--suffix`](/docs/#suffix-suffix),
[`--suffix-keep-extension`](/docs/#suffix-keep-extension)
## Operation ## Operation
### Runtime flow details ### Runtime flow details