Compare commits
50 Commits
master
...
branch-v1.
Author | SHA1 | Date |
---|---|---|
Nick Craig-Wood | ffcaa6cd92 | |
Nick Craig-Wood | 98e81a6c2b | |
Nick Craig-Wood | 9e60a065b4 | |
nielash | 95e18bdc6f | |
dependabot[bot] | a4c7b3da15 | |
Tera | be10debded | |
kapitainsky | a7cb8b71f0 | |
Harshit Budhraja | 322d683997 | |
Harshit Budhraja | 964753b2d5 | |
Nick Craig-Wood | 8b3bd74565 | |
Nick Craig-Wood | c394786c95 | |
Nick Craig-Wood | 824d01065a | |
Nick Craig-Wood | 07bf3a4ccc | |
dependabot[bot] | 19274ed78d | |
Nick Craig-Wood | 6276c7123a | |
Nick Craig-Wood | 863b4125c3 | |
Vincent Murphy | 576ecf559d | |
Nick Craig-Wood | cfd581a986 | |
Nick Craig-Wood | ad8bde69b3 | |
Nick Craig-Wood | 771ec943f2 | |
Nick Craig-Wood | 4a297b35e5 | |
Nick Craig-Wood | 6b61967507 | |
Nick Craig-Wood | e174c8f822 | |
Nick Craig-Wood | bff56d0b24 | |
Nick Craig-Wood | 59ff59e45a | |
dependabot[bot] | c27ab0211c | |
rkonfj | 9979b9d082 | |
WeidiDeng | 2be627aa56 | |
Nick Craig-Wood | 3f7abd278d | |
nielash | 489c36b101 | |
albertony | df65aced2e | |
rarspace01 | 141e97edb8 | |
Oksana | 8571eaf425 | |
Manoj Ghosh | 6ccbebd903 | |
Nick Craig-Wood | 8b8156f7c3 | |
keongalvin | a0b19fefdf | |
Nick Craig-Wood | d0e68480be | |
Nick Craig-Wood | ab6c5252f1 | |
emyarod | 29a23c5e18 | |
dependabot[bot] | caacf55b69 | |
Eli Orzitzer | f62ae71b4c | |
Nick Craig-Wood | 4245a042c0 | |
Nick Craig-Wood | 3f3245fcd4 | |
Nick Craig-Wood | 5742a61d23 | |
ben-ba | 768c57c1ba | |
Manoj Ghosh | 9f42ed3380 | |
halms | 40a7edab2d | |
Nick Craig-Wood | 5a22dad9a7 | |
Nick Craig-Wood | b3c2985544 | |
Nick Craig-Wood | 938753ddc3 |
|
@ -27,12 +27,12 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '1.21'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
|
@ -43,14 +43,14 @@ jobs:
|
|||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '1.21'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '1.21'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
|
@ -59,14 +59,14 @@ jobs:
|
|||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '1.21'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '1.21'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
|
@ -76,20 +76,20 @@ jobs:
|
|||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '1.21'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.20
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.21
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
|
@ -243,7 +243,7 @@ jobs:
|
|||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '1.21'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
|
@ -268,7 +268,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v4
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
2
Makefile
2
Makefile
|
@ -103,7 +103,7 @@ check: rclone
|
|||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
|
|
|
@ -25,6 +25,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
|
|
|
@ -4,6 +4,7 @@ package all
|
|||
import (
|
||||
// Active file systems
|
||||
_ "github.com/rclone/rclone/backend/alias"
|
||||
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/azureblob"
|
||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,21 @@
|
|||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
//go:build acd
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
fstests.Run(t)
|
||||
}
|
|
@ -402,24 +402,6 @@ rclone does if you know the container exists already.
|
|||
Help: `If set, do not do HEAD before GET when getting objects.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "delete_snapshots",
|
||||
Help: `Set to specify how to deal with snapshots on blob deletion.`,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "",
|
||||
Help: "By default, the delete operation fails if a blob has snapshots",
|
||||
}, {
|
||||
Value: string(blob.DeleteSnapshotsOptionTypeInclude),
|
||||
Help: "Specify 'include' to remove the root blob and all its snapshots",
|
||||
}, {
|
||||
Value: string(blob.DeleteSnapshotsOptionTypeOnly),
|
||||
Help: "Specify 'only' to remove only the snapshots but keep the root blob.",
|
||||
},
|
||||
},
|
||||
Default: "",
|
||||
Exclusive: true,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
@ -456,7 +438,6 @@ type Options struct {
|
|||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
NoCheckContainer bool `config:"no_check_container"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
DeleteSnapshots string `config:"delete_snapshots"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
|
@ -2375,10 +2356,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
blb := o.getBlobSVC()
|
||||
opt := blob.DeleteOptions{}
|
||||
if o.fs.opt.DeleteSnapshots != "" {
|
||||
action := blob.DeleteSnapshotsOptionType(o.fs.opt.DeleteSnapshots)
|
||||
opt.DeleteSnapshots = &action
|
||||
//only := blob.DeleteSnapshotsOptionTypeOnly
|
||||
opt := blob.DeleteOptions{
|
||||
//DeleteSnapshots: &only,
|
||||
}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blb.Delete(ctx, &opt)
|
||||
|
|
|
@ -193,12 +193,9 @@ Example:
|
|||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_auth_duration",
|
||||
Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
|
||||
|
||||
This is used in combination with "rclone link" for making files
|
||||
accessible to the public and sets the duration before the download
|
||||
authorization token will expire.
|
||||
Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
||||
|
||||
The duration before the download authorization token will expire.
|
||||
The minimum value is 1 second. The maximum value is one week.`,
|
||||
Default: fs.Duration(7 * 24 * time.Hour),
|
||||
Advanced: true,
|
||||
|
|
|
@ -1207,12 +1207,6 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
|
@ -1725,7 +1719,6 @@ var (
|
|||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
|
|
@ -428,15 +428,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
members := []*team.UserSelectorArg{&user}
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIDs, err := f.team.MembersGetInfo(args)
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||
}
|
||||
if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil {
|
||||
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
|
||||
f.srv = files.New(cfg)
|
||||
|
@ -1231,7 +1231,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||
return nil, err
|
||||
}
|
||||
var total uint64
|
||||
used := q.Used
|
||||
var used = q.Used
|
||||
if q.Allocation != nil {
|
||||
if q.Allocation.Individual != nil {
|
||||
total += q.Allocation.Individual.Allocated
|
||||
|
|
|
@ -970,8 +970,6 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
|||
f.putFtpConnection(&c, err)
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK: // some ftp servers apparently return 250 instead of 257
|
||||
err = nil // see: https://forum.rclone.org/t/rclone-pop-up-an-i-o-error-when-creating-a-folder-in-a-mounted-ftp-drive/44368/
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||
|
|
|
@ -56,7 +56,8 @@ type MediaItem struct {
|
|||
CreationTime time.Time `json:"creationTime"`
|
||||
Width string `json:"width"`
|
||||
Height string `json:"height"`
|
||||
Photo struct{} `json:"photo"`
|
||||
Photo struct {
|
||||
} `json:"photo"`
|
||||
} `json:"mediaMetadata"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
@ -67,7 +68,7 @@ type MediaItems struct {
|
|||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
// Content categories
|
||||
//Content categories
|
||||
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
||||
// LANDSCAPES Media items containing landscapes.
|
||||
// RECEIPTS Media items containing receipts.
|
||||
|
@ -186,5 +187,5 @@ type BatchCreateResponse struct {
|
|||
|
||||
// BatchRemoveItems is for removing items from an album
|
||||
type BatchRemoveItems struct {
|
||||
MediaItemIDs []string `json:"mediaItemIds"`
|
||||
MediaItemIds []string `json:"mediaItemIds"`
|
||||
}
|
||||
|
|
|
@ -280,7 +280,7 @@ func errorHandler(resp *http.Response) error {
|
|||
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
||||
body = []byte("Image not found or broken")
|
||||
}
|
||||
e := api.Error{
|
||||
var e = api.Error{
|
||||
Details: api.ErrorDetails{
|
||||
Code: resp.StatusCode,
|
||||
Message: string(body),
|
||||
|
@ -702,7 +702,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
|||
Path: "/albums",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
request := api.CreateAlbum{
|
||||
var request = api.CreateAlbum{
|
||||
Album: &api.Album{
|
||||
Title: albumTitle,
|
||||
},
|
||||
|
@ -1002,7 +1002,7 @@ func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, resul
|
|||
Method: "POST",
|
||||
Path: "/mediaItems:batchCreate",
|
||||
}
|
||||
request := api.BatchCreateRequest{
|
||||
var request = api.BatchCreateRequest{
|
||||
AlbumID: albumID,
|
||||
}
|
||||
itemsInBatch := 0
|
||||
|
@ -1174,8 +1174,8 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
|
||||
NoResponse: true,
|
||||
}
|
||||
request := api.BatchRemoveItems{
|
||||
MediaItemIDs: []string{o.id},
|
||||
var request = api.BatchRemoveItems{
|
||||
MediaItemIds: []string{o.id},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
|
|
@ -762,12 +762,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||
return nil
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs.
|
||||
|
@ -1003,7 +997,6 @@ var (
|
|||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
|
|
@ -1680,12 +1680,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
|
@ -2110,7 +2104,6 @@ var (
|
|||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.Metadataer = (*Object)(nil)
|
||||
|
|
|
@ -1447,10 +1447,6 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
|||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
if vol == `\\?` && len(s) >= 6 {
|
||||
// `\\?\C:`
|
||||
vol = s[:6]
|
||||
}
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
if !noUNC {
|
||||
|
|
|
@ -1381,12 +1381,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
|
@ -2765,7 +2759,6 @@ var (
|
|||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package quickxorhash
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -171,9 +171,7 @@ var _ hash.Hash = (*quickXorHash)(nil)
|
|||
func BenchmarkQuickXorHash(b *testing.B) {
|
||||
b.SetBytes(1 << 20)
|
||||
buf := make([]byte, 1<<20)
|
||||
n, err := rand.Read(buf)
|
||||
require.NoError(b, err)
|
||||
require.Equal(b, len(buf), n)
|
||||
rand.Read(buf)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
|
|
@ -7,15 +7,12 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
@ -26,7 +23,6 @@ const (
|
|||
operationRename = "rename"
|
||||
operationListMultiPart = "list-multipart-uploads"
|
||||
operationCleanup = "cleanup"
|
||||
operationRestore = "restore"
|
||||
)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
|
@ -81,42 +77,6 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
|||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
},
|
||||
}, {
|
||||
Name: operationRestore,
|
||||
Short: "Restore objects from Archive to Standard storage",
|
||||
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||
rclone backend restore oos:bucket -o hours=HOURS
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
|
||||
It returns a list of status dictionaries with Object Name and Status
|
||||
keys. The Status will be "RESTORED"" if it was successful or an error message
|
||||
if not.
|
||||
|
||||
[
|
||||
{
|
||||
"Object": "test.txt"
|
||||
"Status": "RESTORED",
|
||||
},
|
||||
{
|
||||
"Object": "test/file4.txt"
|
||||
"Status": "RESTORED",
|
||||
}
|
||||
]
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -153,8 +113,6 @@ func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
|||
}
|
||||
}
|
||||
return nil, f.cleanUp(ctx, maxAge)
|
||||
case operationRestore:
|
||||
return f.restore(ctx, opt)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
|
@ -332,63 +290,3 @@ func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPat
|
|||
}
|
||||
return uploadedParts, nil
|
||||
}
|
||||
|
||||
func (f *Fs) restore(ctx context.Context, opt map[string]string) (interface{}, error) {
|
||||
req := objectstorage.RestoreObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
||||
}
|
||||
if hours := opt["hours"]; hours != "" {
|
||||
ihours, err := strconv.Atoi(hours)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad value for hours: %w", err)
|
||||
}
|
||||
req.RestoreObjectsDetails.Hours = &ihours
|
||||
}
|
||||
type status struct {
|
||||
Object string
|
||||
Status string
|
||||
}
|
||||
var (
|
||||
outMu sync.Mutex
|
||||
out = []status{}
|
||||
err error
|
||||
)
|
||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||
// Remember this is run --checkers times concurrently
|
||||
o, ok := obj.(*Object)
|
||||
st := status{Object: obj.Remote(), Status: "RESTORED"}
|
||||
defer func() {
|
||||
outMu.Lock()
|
||||
out = append(out, st)
|
||||
outMu.Unlock()
|
||||
}()
|
||||
if !ok {
|
||||
st.Status = "Not an OCI Object Storage object"
|
||||
return
|
||||
}
|
||||
if o.storageTier == nil || (*o.storageTier != "archive") {
|
||||
st.Status = "Object not in Archive storage tier"
|
||||
return
|
||||
}
|
||||
if operations.SkipDestructive(ctx, obj, "restore") {
|
||||
return
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
reqCopy := req
|
||||
reqCopy.BucketName = &bucket
|
||||
reqCopy.ObjectName = &bucketPath
|
||||
var response objectstorage.RestoreObjectsResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RestoreObjects(ctx, reqCopy)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
st.Status = err.Error()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
|
|
@ -948,12 +948,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||
return usage, nil
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||
|
@ -1286,7 +1280,6 @@ var (
|
|||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
|
|
@ -71,11 +71,12 @@ type Error struct {
|
|||
|
||||
// ErrorDetails contains further details of api error
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct {
|
||||
} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Message string `json:"message,omitempty"`
|
||||
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||
Detail string `json:"detail,omitempty"`
|
||||
|
@ -265,11 +266,13 @@ type FileApp struct {
|
|||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Params struct{} `json:"params,omitempty"` // TODO
|
||||
CategoryIDs []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct{} `json:"links,omitempty"` // TODO
|
||||
Params struct {
|
||||
} `json:"params,omitempty"` // TODO
|
||||
CategoryIds []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct {
|
||||
} `json:"links,omitempty"` // TODO
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
@ -381,10 +384,11 @@ type NewTask struct {
|
|||
|
||||
// About informs drive status
|
||||
type About struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||
Quota *Quota `json:"quota,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
Quotas struct{} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||
Quota *Quota `json:"quota,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
Quotas struct {
|
||||
} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||
}
|
||||
|
||||
// Quota informs drive quota
|
||||
|
@ -458,7 +462,7 @@ type DecompressResult struct {
|
|||
|
||||
// RequestShare is to request for file share
|
||||
type RequestShare struct {
|
||||
FileIDs []string `json:"file_ids,omitempty"`
|
||||
FileIds []string `json:"file_ids,omitempty"`
|
||||
ShareTo string `json:"share_to,omitempty"` // "publiclink",
|
||||
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
|
||||
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
|
||||
|
@ -466,7 +470,7 @@ type RequestShare struct {
|
|||
|
||||
// RequestBatch is to request for batch actions
|
||||
type RequestBatch struct {
|
||||
IDs []string `json:"ids,omitempty"`
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
To map[string]string `json:"to,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
@ -775,7 +775,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||
expiry = int(math.Ceil(time.Duration(expire).Hours() / 24))
|
||||
}
|
||||
req := api.RequestShare{
|
||||
FileIDs: []string{id},
|
||||
FileIds: []string{id},
|
||||
ShareTo: "publiclink",
|
||||
ExpirationDays: expiry,
|
||||
PassCodeOption: "NOT_REQUIRED",
|
||||
|
@ -797,7 +797,7 @@ func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (er
|
|||
action = "batchTrash"
|
||||
}
|
||||
req := api.RequestBatch{
|
||||
IDs: IDs,
|
||||
Ids: IDs,
|
||||
}
|
||||
if err := f.requestBatchAction(ctx, action, &req); err != nil {
|
||||
return fmt.Errorf("delete object failed: %w", err)
|
||||
|
@ -817,7 +817,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
trashedFiles := false
|
||||
var trashedFiles = false
|
||||
if check {
|
||||
found, err := f.listAll(ctx, rootID, "", "", func(item *api.File) bool {
|
||||
if !item.Trashed {
|
||||
|
@ -893,7 +893,7 @@ func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err e
|
|||
return nil
|
||||
}
|
||||
req := api.RequestBatch{
|
||||
IDs: IDs,
|
||||
Ids: IDs,
|
||||
To: map[string]string{"parent_id": parentIDForRequest(dirID)},
|
||||
}
|
||||
if err := f.requestBatchAction(ctx, "batchMove", &req); err != nil {
|
||||
|
@ -1039,7 +1039,7 @@ func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err e
|
|||
return nil
|
||||
}
|
||||
req := api.RequestBatch{
|
||||
IDs: IDs,
|
||||
Ids: IDs,
|
||||
To: map[string]string{"parent_id": parentIDForRequest(dirID)},
|
||||
}
|
||||
if err := f.requestBatchAction(ctx, "batchCopy", &req); err != nil {
|
||||
|
|
|
@ -770,12 +770,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||
return o.(*Object).url, nil
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
|
@ -1116,7 +1110,6 @@ var (
|
|||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
|
|
|
@ -1198,12 +1198,11 @@ func (o *Object) uploadSession(ctx context.Context, parentID, name string) (uplo
|
|||
|
||||
func (o *Object) upload(ctx context.Context, uploadKey string, chunk io.Reader, fullSize int64, offset int64, chunkSize int64, options ...fs.OpenOption) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: fmt.Sprintf(uploadURL, o.fs.opt.Host) + uploadKey,
|
||||
Body: chunk,
|
||||
ContentLength: &chunkSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, fullSize),
|
||||
Options: options,
|
||||
Method: "POST",
|
||||
RootURL: fmt.Sprintf(uploadURL, o.fs.opt.Host) + uploadKey,
|
||||
Body: chunk,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize, fullSize),
|
||||
Options: options,
|
||||
}
|
||||
|
||||
var fileID string
|
||||
|
|
|
@ -61,7 +61,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// The S3 providers
|
||||
|
@ -2186,10 +2185,10 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
|||
Sensitive: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads and copies.
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently for multipart uploads and copies.
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
|
@ -2220,13 +2219,6 @@ If it is set then rclone will use v2 authentication.
|
|||
Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_dual_stack",
|
||||
Help: `If true use AWS S3 dual-stack endpoint (IPv6 support).
|
||||
|
||||
See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_accelerate_endpoint",
|
||||
Provider: "AWS",
|
||||
|
@ -2434,19 +2426,6 @@ See [the time option docs](/docs/#time-option) for valid formats.
|
|||
`,
|
||||
Default: fs.Time{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "version_deleted",
|
||||
Help: `Show deleted file markers when using versions.
|
||||
|
||||
This shows deleted file markers in the listing when using versions. These will appear
|
||||
as 0 size files. The only operation which can be performed on them is deletion.
|
||||
|
||||
Deleting a delete marker will reveal the previous version.
|
||||
|
||||
Deleted files will always show with a timestamp.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "decompress",
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
|
@ -2636,7 +2615,6 @@ type Options struct {
|
|||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
UseDualStack bool `config:"use_dual_stack"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
|
@ -2675,7 +2653,6 @@ type Options struct {
|
|||
UsePresignedRequest bool `config:"use_presigned_request"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
VersionDeleted bool `config:"version_deleted"`
|
||||
Decompress bool `config:"decompress"`
|
||||
MightGzip fs.Tristate `config:"might_gzip"`
|
||||
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
|
||||
|
@ -2965,9 +2942,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
|||
r.addService("sts", opt.STSEndpoint)
|
||||
awsConfig.WithEndpointResolver(r)
|
||||
}
|
||||
if opt.UseDualStack {
|
||||
awsConfig.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
awsSessionOpts := session.Options{
|
||||
|
@ -3222,10 +3196,6 @@ func setQuirks(opt *Options) {
|
|||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
|
||||
// GCS S3 doesn't support multi-part server side copy:
|
||||
// See: https://issuetracker.google.com/issues/323465186
|
||||
// So make cutoff very large which it does seem to support
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
default:
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
fallthrough
|
||||
|
@ -3450,7 +3420,6 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
|
|||
withVersions: f.opt.Versions,
|
||||
findFile: true,
|
||||
versionAt: f.opt.VersionAt,
|
||||
hidden: f.opt.VersionDeleted,
|
||||
}, func(gotRemote string, object *s3.Object, objectVersionID *string, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
|
@ -3512,10 +3481,6 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
|||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = stringClonePointer(info.StorageClass)
|
||||
o.versionID = stringClonePointer(versionID)
|
||||
// If is delete marker, show that metadata has been read as there is none to read
|
||||
if info.Size == isDeleteMarker {
|
||||
o.meta = map[string]string{}
|
||||
}
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
|
@ -3813,7 +3778,7 @@ func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output,
|
|||
//structs.SetFrom(obj, objVersion)
|
||||
setFrom_s3Object_s3ObjectVersion(obj, objVersion)
|
||||
// Adjust the file names
|
||||
if !ls.usingVersionAt && (!aws.BoolValue(objVersion.IsLatest) || objVersion.Size == isDeleteMarker) {
|
||||
if !ls.usingVersionAt && !aws.BoolValue(objVersion.IsLatest) {
|
||||
if obj.Key != nil && objVersion.LastModified != nil {
|
||||
*obj.Key = version.Add(*obj.Key, *objVersion.LastModified)
|
||||
}
|
||||
|
@ -4081,7 +4046,6 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
|||
addBucket: addBucket,
|
||||
withVersions: f.opt.Versions,
|
||||
versionAt: f.opt.VersionAt,
|
||||
hidden: f.opt.VersionDeleted,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory)
|
||||
if err != nil {
|
||||
|
@ -4168,7 +4132,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||
recurse: true,
|
||||
withVersions: f.opt.Versions,
|
||||
versionAt: f.opt.VersionAt,
|
||||
hidden: f.opt.VersionDeleted,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory)
|
||||
if err != nil {
|
||||
|
@ -4512,20 +4475,10 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
|||
|
||||
fs.Debugf(src, "Starting multipart copy with %d parts", numParts)
|
||||
|
||||
var (
|
||||
parts = make([]*s3.CompletedPart, numParts)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
)
|
||||
g.SetLimit(f.opt.UploadConcurrency)
|
||||
var parts []*s3.CompletedPart
|
||||
for partNum := int64(1); partNum <= numParts; partNum++ {
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
partNum := partNum // for closure
|
||||
g.Go(func() error {
|
||||
var uout *s3.UploadPartCopyOutput
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
partNum := partNum
|
||||
uploadPartReq := &s3.UploadPartCopyInput{}
|
||||
//structs.SetFrom(uploadPartReq, copyReq)
|
||||
setFrom_s3UploadPartCopyInput_s3CopyObjectInput(uploadPartReq, copyReq)
|
||||
|
@ -4534,24 +4487,18 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
|||
uploadPartReq.PartNumber = &partNum
|
||||
uploadPartReq.UploadId = uid
|
||||
uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
uout, err = f.c.UploadPartCopyWithContext(gCtx, uploadPartReq)
|
||||
return f.shouldRetry(gCtx, err)
|
||||
})
|
||||
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
|
||||
if err != nil {
|
||||
return err
|
||||
return f.shouldRetry(ctx, err)
|
||||
}
|
||||
parts[partNum-1] = &s3.CompletedPart{
|
||||
parts = append(parts, &s3.CompletedPart{
|
||||
PartNumber: &partNum,
|
||||
ETag: uout.CopyPartResult.ETag,
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
})
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
|
@ -4951,7 +4898,6 @@ func (f *Fs) restoreStatus(ctx context.Context, all bool) (out []restoreStatusOu
|
|||
recurse: true,
|
||||
withVersions: f.opt.Versions,
|
||||
versionAt: f.opt.VersionAt,
|
||||
hidden: f.opt.VersionDeleted,
|
||||
restoreStatus: true,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory)
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
@ -12,13 +9,6 @@ import (
|
|||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
||||
ctx, opt := context.Background(), new(Options)
|
||||
opt.Provider = "AWS"
|
||||
client := getClient(ctx, opt)
|
||||
return ctx, opt, client
|
||||
}
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
@ -49,28 +39,6 @@ func TestIntegration2(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAWSDualStackOption(t *testing.T) {
|
||||
{
|
||||
// test enabled
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
opt.UseDualStack = true
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if !strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
{
|
||||
// test default case
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, NOT wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
|
|
@ -577,7 +577,7 @@ func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (s
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (f *Fs) download(ctx context.Context, downloadLink string, size int64, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
// Check if we need to download partial content
|
||||
var start, end int64 = 0, size
|
||||
partialContent := false
|
||||
|
@ -606,18 +606,11 @@ func (f *Fs) download(ctx context.Context, downloadLink string, size int64, opti
|
|||
// Build the http request
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: url,
|
||||
Options: options,
|
||||
}
|
||||
parsedURL, err := url.Parse(downloadLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse download url: %w", err)
|
||||
}
|
||||
if parsedURL.IsAbs() {
|
||||
opts.RootURL = downloadLink
|
||||
} else {
|
||||
opts.Path = downloadLink
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
|
@ -625,7 +618,7 @@ func (f *Fs) download(ctx context.Context, downloadLink string, size int64, opti
|
|||
if err != nil {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 404 {
|
||||
return nil, fmt.Errorf("file not found '%s'", downloadLink)
|
||||
return nil, fmt.Errorf("file not found '%s'", url)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
|
@ -695,19 +688,11 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
|||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: uploadLink,
|
||||
Body: formReader,
|
||||
ContentType: contentType,
|
||||
Parameters: url.Values{"ret-json": {"1"}}, // It needs to be on the url, not in the body parameters
|
||||
}
|
||||
parsedURL, err := url.Parse(uploadLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse upload url: %w", err)
|
||||
}
|
||||
if parsedURL.IsAbs() {
|
||||
opts.RootURL = uploadLink
|
||||
} else {
|
||||
opts.Path = uploadLink
|
||||
}
|
||||
result := make([]api.FileDetail, 1)
|
||||
var resp *http.Response
|
||||
// If an error occurs during the call, do not attempt to retry: The upload link is single use only
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
@ -94,7 +93,8 @@ func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
|
|||
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
|
||||
|
||||
// Allow the command a short time only to shut down
|
||||
s.cmd.WaitDelay = time.Second
|
||||
// FIXME enable when we get rid of go1.19
|
||||
// s.cmd.WaitDelay = time.Second
|
||||
|
||||
return s
|
||||
}
|
||||
|
|
|
@ -1176,12 +1176,6 @@ func (f *Fs) DirCacheFlush() {
|
|||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
|
@ -1472,7 +1466,6 @@ var (
|
|||
_ fs.Copier = (*Fs)(nil)
|
||||
// _ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
|
|
@ -75,7 +75,6 @@ type Prop struct {
|
|||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
Permissions string `xml:"prop>permissions,omitempty"`
|
||||
MESha1Hex *string `xml:"ME: prop>sha1hex,omitempty"` // Fastmail-specific sha1 checksum
|
||||
}
|
||||
|
||||
|
|
|
@ -149,11 +149,6 @@ Set to 0 to disable chunked uploading.
|
|||
`,
|
||||
Advanced: true,
|
||||
Default: 10 * fs.Mebi, // Default NextCloud `max_chunk_size` is `10 MiB`. See https://github.com/nextcloud/server/blob/0447b53bda9fe95ea0cbed765aa332584605d652/apps/files/lib/App.php#L57
|
||||
}, {
|
||||
Name: "owncloud_exclude_shares",
|
||||
Help: "Exclude ownCloud shares",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
@ -170,7 +165,6 @@ type Options struct {
|
|||
Headers fs.CommaSepList `config:"headers"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
|
||||
ExcludeShares bool `config:"owncloud_exclude_shares"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
|
@ -708,7 +702,6 @@ var owncloudProps = []byte(`<?xml version="1.0"?>
|
|||
<d:resourcetype />
|
||||
<d:getcontenttype />
|
||||
<oc:checksums />
|
||||
<oc:permissions />
|
||||
</d:prop>
|
||||
</d:propfind>
|
||||
`)
|
||||
|
@ -804,11 +797,6 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
|||
continue
|
||||
}
|
||||
}
|
||||
if f.opt.ExcludeShares {
|
||||
if strings.Contains(item.Props.Permissions, "S") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// item.Name = restoreReservedChars(item.Name)
|
||||
if fn(remote, isDir, &item.Props) {
|
||||
found = true
|
||||
|
|
|
@ -23,6 +23,8 @@ CATEGORY = re.compile(r"(^[\w/ ]+(?:, *[\w/ ]+)*):\s*(.*)$")
|
|||
backends = [ x for x in os.listdir("backend") if x != "all"]
|
||||
|
||||
backend_aliases = {
|
||||
"amazon cloud drive" : "amazonclouddrive",
|
||||
"acd" : "amazonclouddrive",
|
||||
"google cloud storage" : "googlecloudstorage",
|
||||
"gcs" : "googlecloudstorage",
|
||||
"azblob" : "azureblob",
|
||||
|
@ -32,6 +34,7 @@ backend_aliases = {
|
|||
}
|
||||
|
||||
backend_titles = {
|
||||
"amazonclouddrive": "Amazon Cloud Drive",
|
||||
"googlecloudstorage": "Google Cloud Storage",
|
||||
"azureblob": "Azure Blob",
|
||||
"ftp": "FTP",
|
||||
|
|
|
@ -30,6 +30,7 @@ docs = [
|
|||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
"alias.md",
|
||||
"amazonclouddrive.md",
|
||||
"s3.md",
|
||||
"b2.md",
|
||||
"box.md",
|
||||
|
@ -121,7 +122,6 @@ ignore_docs = [
|
|||
"downloads.md",
|
||||
"privacy.md",
|
||||
"sponsor.md",
|
||||
"amazonclouddrive.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
|
|
|
@ -2,19 +2,16 @@
|
|||
package bilib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// FsPath converts Fs to a suitable rclone argument
|
||||
func FsPath(f fs.Info) string {
|
||||
func FsPath(f fs.Fs) string {
|
||||
name, path, slash := f.Name(), f.Root(), "/"
|
||||
if name == "local" {
|
||||
slash = string(os.PathSeparator)
|
||||
|
@ -41,57 +38,5 @@ var nonCanonicalChars = regexp.MustCompile(`[\s\\/:?*]`)
|
|||
|
||||
// SessionName makes a unique base name for the sync operation
|
||||
func SessionName(fs1, fs2 fs.Fs) string {
|
||||
return StripHexString(CanonicalPath(FsPath(fs1))) + ".." + StripHexString(CanonicalPath(FsPath(fs2)))
|
||||
}
|
||||
|
||||
// StripHexString strips the (first) canonical {hexstring} suffix
|
||||
func StripHexString(path string) string {
|
||||
open := strings.IndexRune(path, '{')
|
||||
close := strings.IndexRune(path, '}')
|
||||
if open >= 0 && close > open {
|
||||
return path[:open] + path[close+1:] // (trailing underscore)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// HasHexString returns true if path contains at least one canonical {hexstring} suffix
|
||||
func HasHexString(path string) bool {
|
||||
open := strings.IndexRune(path, '{')
|
||||
if open >= 0 && strings.IndexRune(path, '}') > open {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BasePath joins the workDir with the SessionName, stripping {hexstring} suffix if necessary
|
||||
func BasePath(ctx context.Context, workDir string, fs1, fs2 fs.Fs) string {
|
||||
suffixedSession := CanonicalPath(FsPath(fs1)) + ".." + CanonicalPath(FsPath(fs2))
|
||||
suffixedBasePath := filepath.Join(workDir, suffixedSession)
|
||||
listing1 := suffixedBasePath + ".path1.lst"
|
||||
listing2 := suffixedBasePath + ".path2.lst"
|
||||
|
||||
sessionName := SessionName(fs1, fs2)
|
||||
basePath := filepath.Join(workDir, sessionName)
|
||||
|
||||
// Normalize to non-canonical version for overridden configs
|
||||
// to ensure that backend-specific flags don't change the listing filename.
|
||||
// For backward-compatibility, we first check if we found a listing file with the suffixed version.
|
||||
// If so, we rename it (and overwrite non-suffixed version, if any.)
|
||||
// If not, we carry on with the non-suffixed version.
|
||||
// We should only find a suffixed version if bisync v1.66 or older created it.
|
||||
if HasHexString(suffixedSession) && FileExists(listing1) {
|
||||
fs.Infof(listing1, "renaming to: %s", basePath+".path1.lst")
|
||||
if !operations.SkipDestructive(ctx, listing1, "rename to "+basePath+".path1.lst") {
|
||||
_ = os.Rename(listing1, basePath+".path1.lst")
|
||||
}
|
||||
}
|
||||
if HasHexString(suffixedSession) && FileExists(listing2) {
|
||||
fs.Infof(listing2, "renaming to: %s", basePath+".path2.lst")
|
||||
if !operations.SkipDestructive(ctx, listing1, "rename to "+basePath+".path2.lst") {
|
||||
_ = os.Rename(listing2, basePath+".path2.lst")
|
||||
} else {
|
||||
return suffixedBasePath
|
||||
}
|
||||
}
|
||||
return basePath
|
||||
return CanonicalPath(FsPath(fs1)) + ".." + CanonicalPath(FsPath(fs2))
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func FileExists(file string) bool {
|
|||
return !os.IsNotExist(err)
|
||||
}
|
||||
|
||||
// CopyFileIfExists is like CopyFile but does not fail if source does not exist
|
||||
// CopyFileIfExists is like CopyFile but does to fail if source does not exist
|
||||
func CopyFileIfExists(srcFile, dstFile string) error {
|
||||
if !FileExists(srcFile) {
|
||||
return nil
|
||||
|
|
|
@ -5,8 +5,6 @@ import (
|
|||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Names comprises a set of file names
|
||||
|
@ -61,105 +59,3 @@ func SaveList(list []string, path string) error {
|
|||
}
|
||||
return os.WriteFile(path, buf.Bytes(), PermSecure)
|
||||
}
|
||||
|
||||
// AliasMap comprises a pair of names that are not equal but treated as equal for comparison purposes
|
||||
// For example, when normalizing unicode and casing
|
||||
// This helps reduce repeated normalization functions, which really slow things down
|
||||
type AliasMap map[string]string
|
||||
|
||||
// Add adds new pair to the set, in both directions
|
||||
func (am AliasMap) Add(name1, name2 string) {
|
||||
if name1 != name2 {
|
||||
am[name1] = name2
|
||||
am[name2] = name1
|
||||
}
|
||||
}
|
||||
|
||||
// Alias returns the alternate version, if any, else the original.
|
||||
func (am AliasMap) Alias(name1 string) string {
|
||||
// note: we don't need to check normalization settings, because we already did it in March.
|
||||
// the AliasMap will only exist if March paired up two unequal filenames.
|
||||
name2, ok := am[name1]
|
||||
if ok {
|
||||
return name2
|
||||
}
|
||||
return name1
|
||||
}
|
||||
|
||||
// ParseGlobs determines whether a string contains {brackets}
|
||||
// and returns the substring (including both brackets) for replacing
|
||||
// substring is first opening bracket to last closing bracket --
|
||||
// good for {{this}} but not {this}{this}
|
||||
func ParseGlobs(s string) (hasGlobs bool, substring string) {
|
||||
open := strings.Index(s, "{")
|
||||
close := strings.LastIndex(s, "}")
|
||||
if open >= 0 && close > open {
|
||||
return true, s[open : close+1]
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// TrimBrackets converts {{this}} to this
|
||||
func TrimBrackets(s string) string {
|
||||
return strings.Trim(s, "{}")
|
||||
}
|
||||
|
||||
// TimeFormat converts a user-supplied string to a Go time constant, if possible
|
||||
func TimeFormat(timeFormat string) string {
|
||||
switch timeFormat {
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
// timeFormat = time.DateTime // missing in go1.19
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "DateOnly":
|
||||
// timeFormat = time.DateOnly // missing in go1.19
|
||||
timeFormat = "2006-01-02"
|
||||
case "TimeOnly":
|
||||
// timeFormat = time.TimeOnly // missing in go1.19
|
||||
timeFormat = "15:04:05"
|
||||
case "MacFriendlyTime", "macfriendlytime", "mac":
|
||||
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
|
||||
}
|
||||
return timeFormat
|
||||
}
|
||||
|
||||
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
|
||||
func AppyTimeGlobs(s string, t time.Time) string {
|
||||
hasGlobs, substring := ParseGlobs(s)
|
||||
if !hasGlobs {
|
||||
return s
|
||||
}
|
||||
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
|
||||
return strings.ReplaceAll(s, substring, timeString)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync"
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
|
@ -30,16 +29,12 @@ import (
|
|||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -76,16 +71,6 @@ var logReplacements = []string{
|
|||
`^NOTICE: too_many_(requests|write_operations)/\.*: Too many requests or write operations.*$`, dropMe,
|
||||
`^NOTICE: Dropbox root .*?: Forced to upload files to set modification times on this backend.$`, dropMe,
|
||||
`^INFO : .*?: src and dst identical but can't set mod time without deleting and re-uploading$`, dropMe,
|
||||
// ignore crypt info messages
|
||||
`^INFO : .*?: Crypt detected! Using cryptcheck instead of check. \(Use --size-only or --ignore-checksum to disable\)$`, dropMe,
|
||||
// ignore drive info messages
|
||||
`^NOTICE:.*?Files of unknown size \(such as Google Docs\) do not sync reliably with --checksum or --size-only\. Consider using modtime instead \(the default\) or --drive-skip-gdocs.*?$`, dropMe,
|
||||
// ignore differences in backend features
|
||||
`^.*?"HashType1":.*?$`, dropMe,
|
||||
`^.*?"HashType2":.*?$`, dropMe,
|
||||
`^.*?"SlowHashDetected":.*?$`, dropMe,
|
||||
`^.*? for same-side diffs on .*?$`, dropMe,
|
||||
`^.*?Downloading hashes.*?$`, dropMe,
|
||||
}
|
||||
|
||||
// Some dry-run messages differ depending on the particular remote.
|
||||
|
@ -111,23 +96,17 @@ var logHoppers = []string{
|
|||
// subdirectories. The order inconsistency initially showed up in the
|
||||
// listings and triggered reordering of log messages, but the actual
|
||||
// files will in fact match.
|
||||
`.* +.....Access test failed: Path[12] file not found in Path[12].*`,
|
||||
`ERROR : - +Access test failed: Path[12] file not found in Path[12] - .*`,
|
||||
|
||||
// Test case `resync` suffered from the order of queued copies.
|
||||
`(?:INFO |NOTICE): - Path2 Resync will copy to Path1 +- .*`,
|
||||
|
||||
// Test case `normalization` can have random order of fix-case files.
|
||||
`(?:INFO |NOTICE): .*: Fixed case by renaming to: .*`,
|
||||
|
||||
// order of files re-checked prior to a conflict rename
|
||||
`ERROR : .*: md5 differ.*`,
|
||||
}
|
||||
|
||||
// Some log lines can contain Windows path separator that must be
|
||||
// converted to "/" in every matching token to match golden logs.
|
||||
var logLinesWithSlash = []string{
|
||||
`.*\(\d\d\) :.*(fix-names|touch-glob|touch-copy|copy-file|copy-as|copy-dir|delete-file) `,
|
||||
`INFO : - .*Path[12].* +.*Queue copy to.* Path[12].*`,
|
||||
`\(\d\d\) : (touch-glob|touch-copy|copy-file|copy-as|copy-dir|delete-file) `,
|
||||
`INFO : - Path[12] +Queue copy to Path[12] `,
|
||||
`INFO : Synching Path1 .*? with Path2 `,
|
||||
`INFO : Validating listings for `,
|
||||
}
|
||||
|
@ -186,11 +165,8 @@ type bisyncTest struct {
|
|||
golden bool
|
||||
debug bool
|
||||
stopAt int
|
||||
TestFn bisync.TestFunc
|
||||
}
|
||||
|
||||
var color = bisync.Color
|
||||
|
||||
// TestBisync is a test engine for bisync test cases.
|
||||
func TestBisync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
@ -204,8 +180,6 @@ func TestBisync(t *testing.T) {
|
|||
if *argRefreshTimes {
|
||||
ci.RefreshTimes = true
|
||||
}
|
||||
bisync.Colors = true
|
||||
time.Local, _ = time.LoadLocation("America/New_York")
|
||||
|
||||
baseDir, err := os.Getwd()
|
||||
require.NoError(t, err, "get current directory")
|
||||
|
@ -260,10 +234,6 @@ func TestBisync(t *testing.T) {
|
|||
testList = nil
|
||||
for _, testCase := range b.listDir(b.dataRoot) {
|
||||
if strings.HasPrefix(testCase, "test_") {
|
||||
// if dir is empty, skip it (can happen due to gitignored files/dirs when checking out branch)
|
||||
if len(b.listDir(filepath.Join(b.dataRoot, testCase))) == 0 {
|
||||
continue
|
||||
}
|
||||
testList = append(testList, testCase)
|
||||
}
|
||||
}
|
||||
|
@ -307,10 +277,6 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
|||
b.goldenDir = b.ensureDir(b.testDir, "golden", false)
|
||||
b.dataDir = b.ensureDir(b.testDir, "modfiles", true) // optional
|
||||
|
||||
// normalize unicode so tets are runnable on macOS
|
||||
b.sessionName = norm.NFC.String(b.sessionName)
|
||||
b.goldenDir = norm.NFC.String(b.goldenDir)
|
||||
|
||||
// For test stability, jam initial dates to a fixed past date.
|
||||
// Test cases that change files will touch specific files to fixed new dates.
|
||||
initDate := time.Date(2000, time.January, 1, 0, 0, 0, 0, bisync.TZ)
|
||||
|
@ -407,16 +373,16 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
|||
var passed bool
|
||||
switch errorCount {
|
||||
case 0:
|
||||
msg = color(terminal.GreenFg, fmt.Sprintf("TEST %s PASSED", b.testCase))
|
||||
msg = fmt.Sprintf("TEST %s PASSED", b.testCase)
|
||||
passed = true
|
||||
case -2:
|
||||
msg = color(terminal.YellowFg, fmt.Sprintf("TEST %s SKIPPED", b.testCase))
|
||||
msg = fmt.Sprintf("TEST %s SKIPPED", b.testCase)
|
||||
passed = true
|
||||
case -1:
|
||||
msg = color(terminal.RedFg, fmt.Sprintf("TEST %s FAILED - WRONG NUMBER OF FILES", b.testCase))
|
||||
msg = fmt.Sprintf("TEST %s FAILED - WRONG NUMBER OF FILES", b.testCase)
|
||||
passed = false
|
||||
default:
|
||||
msg = color(terminal.RedFg, fmt.Sprintf("TEST %s FAILED - %d MISCOMPARED FILES", b.testCase, errorCount))
|
||||
msg = fmt.Sprintf("TEST %s FAILED - %d MISCOMPARED FILES", b.testCase, errorCount)
|
||||
buckets := b.fs1.Features().BucketBased || b.fs2.Features().BucketBased
|
||||
passed = false
|
||||
if b.testCase == "rmdirs" && buckets {
|
||||
|
@ -483,7 +449,7 @@ func (b *bisyncTest) cleanupCase(ctx context.Context) {
|
|||
func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
var fsrc, fdst fs.Fs
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
b.logPrintf("%s %s", color(terminal.CyanFg, b.stepStr), color(terminal.BlueFg, line))
|
||||
b.logPrintf("%s %s", b.stepStr, line)
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
ciSave := *ci
|
||||
|
@ -495,23 +461,6 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
|||
ci.LogLevel = fs.LogLevelDebug
|
||||
}
|
||||
|
||||
testFunc := func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
dst := "file" + fmt.Sprint(i) + ".txt"
|
||||
err := b.copyFile(ctx, src, b.path2, dst)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "error copying file: %v", err)
|
||||
}
|
||||
dst = "file" + fmt.Sprint(100-i) + ".txt"
|
||||
err = b.copyFile(ctx, src, b.path1, dst)
|
||||
if err != nil {
|
||||
fs.Errorf(dst, "error copying file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
args := splitLine(line)
|
||||
switch args[0] {
|
||||
case "test":
|
||||
|
@ -528,12 +477,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
|||
if fsrc, err = fs.NewFs(ctx, args[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
err = purgeChildren(ctx, fsrc, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flushCache(fsrc)
|
||||
return
|
||||
return purgeChildren(ctx, fsrc, "")
|
||||
case "delete-file":
|
||||
b.checkArgs(args, 1, 1)
|
||||
dir, file := filepath.Split(args[1])
|
||||
|
@ -576,16 +520,6 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
|||
case "copy-as":
|
||||
b.checkArgs(args, 3, 3)
|
||||
return b.copyFile(ctx, args[1], args[2], args[3])
|
||||
case "copy-as-NFC":
|
||||
b.checkArgs(args, 3, 3)
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.FixCase = true
|
||||
return b.copyFile(ctx, args[1], norm.NFC.String(args[2]), norm.NFC.String(args[3]))
|
||||
case "copy-as-NFD":
|
||||
b.checkArgs(args, 3, 3)
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.FixCase = true
|
||||
return b.copyFile(ctx, args[1], norm.NFD.String(args[2]), norm.NFD.String(args[3]))
|
||||
case "copy-dir", "sync-dir":
|
||||
b.checkArgs(args, 2, 2)
|
||||
if fsrc, err = cache.Get(ctx, args[1]); err != nil {
|
||||
|
@ -603,131 +537,9 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
|||
return err
|
||||
case "list-dirs":
|
||||
b.checkArgs(args, 1, 1)
|
||||
return b.listSubdirs(ctx, args[1], true)
|
||||
case "list-files":
|
||||
b.checkArgs(args, 1, 1)
|
||||
return b.listSubdirs(ctx, args[1], false)
|
||||
return b.listSubdirs(ctx, args[1])
|
||||
case "bisync":
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.IgnoreCaseSync = false
|
||||
// ci.FixCase = true
|
||||
return b.runBisync(ctx, args[1:])
|
||||
case "test-func":
|
||||
b.TestFn = testFunc
|
||||
return
|
||||
case "fix-names":
|
||||
// in case the local os converted any filenames
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.FixCase = true
|
||||
ci.IgnoreTimes = true
|
||||
reset := func() {
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.FixCase = false
|
||||
ci.IgnoreTimes = false
|
||||
}
|
||||
defer reset()
|
||||
b.checkArgs(args, 1, 1)
|
||||
var ok bool
|
||||
var remoteName string
|
||||
var remotePath string
|
||||
remoteName, remotePath, err = fspath.SplitFs(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if remoteName == "" {
|
||||
remoteName = "/"
|
||||
}
|
||||
|
||||
fsrc, err = fs.NewFs(ctx, remoteName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// DEBUG
|
||||
fs.Debugf(remotePath, "is NFC: %v", norm.NFC.IsNormalString(remotePath))
|
||||
fs.Debugf(remotePath, "is NFD: %v", norm.NFD.IsNormalString(remotePath))
|
||||
fs.Debugf(remotePath, "is valid UTF8: %v", utf8.ValidString(remotePath))
|
||||
|
||||
// check if it's a dir, try moving it
|
||||
var leaf string
|
||||
_, leaf, err = fspath.Split(remotePath)
|
||||
if err == nil && leaf == "" {
|
||||
remotePath = args[1]
|
||||
fs.Debugf(remotePath, "attempting to fix directory")
|
||||
|
||||
fixDirname := func(old, new string) {
|
||||
if new != old {
|
||||
oldName, err := fs.NewFs(ctx, old)
|
||||
if err != nil {
|
||||
fs.Logf(old, "error getting Fs: %v", err)
|
||||
}
|
||||
fs.Debugf(nil, "Attempting to move %s to %s", oldName.Root(), new)
|
||||
// Create random name to temporarily move dir to
|
||||
tmpDirName := strings.TrimSuffix(new, slash) + "-rclone-move-" + random.String(8)
|
||||
var tmpDirFs fs.Fs
|
||||
tmpDirFs, _ = fs.NewFs(ctx, tmpDirName)
|
||||
err = sync.MoveDir(ctx, tmpDirFs, oldName, true, true)
|
||||
if err != nil {
|
||||
fs.Debugf(oldName, "error attempting to move folder: %v", err)
|
||||
}
|
||||
// now move the temp dir to real name
|
||||
fsrc, _ = fs.NewFs(ctx, new)
|
||||
err = sync.MoveDir(ctx, fsrc, tmpDirFs, true, true)
|
||||
if err != nil {
|
||||
fs.Debugf(tmpDirFs, "error attempting to move folder to %s: %v", fsrc.Root(), err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(nil, "old and new are equal. Skipping. %s (%s) %s (%s)", old, stringToHash(old), new, stringToHash(new))
|
||||
}
|
||||
}
|
||||
|
||||
if norm.NFC.String(remotePath) != remotePath && norm.NFD.String(remotePath) != remotePath {
|
||||
fs.Debugf(remotePath, "This is neither fully NFD or NFC -- can't fix reliably!")
|
||||
}
|
||||
fixDirname(norm.NFC.String(remotePath), remotePath)
|
||||
fixDirname(norm.NFD.String(remotePath), remotePath)
|
||||
return
|
||||
}
|
||||
|
||||
// if it's a file
|
||||
fs.Debugf(remotePath, "attempting to fix file -- filename hash: %s", stringToHash(leaf))
|
||||
fixFilename := func(old, new string) {
|
||||
ok, err := fs.FileExists(ctx, fsrc, old)
|
||||
if err != nil {
|
||||
fs.Debugf(remotePath, "error checking if file exists: %v", err)
|
||||
}
|
||||
fs.Debugf(old, "file exists: %v %s", ok, stringToHash(old))
|
||||
fs.Debugf(nil, "FILE old: %s new: %s equal: %v", old, new, old == new)
|
||||
fs.Debugf(nil, "HASH old: %s new: %s equal: %v", stringToHash(old), stringToHash(new), stringToHash(old) == stringToHash(new))
|
||||
if ok && new != old {
|
||||
fs.Debugf(new, "attempting to rename %s to %s", old, new)
|
||||
err = operations.MoveFile(ctx, fsrc, fsrc, new, old)
|
||||
if err != nil {
|
||||
fs.Errorf(new, "error trying to rename %s to %s - %v", old, new, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// look for NFC version
|
||||
fixFilename(norm.NFC.String(remotePath), remotePath)
|
||||
// if it's in a subdir we just moved, the file and directory might have different encodings. Check for that.
|
||||
mixed := strings.TrimSuffix(norm.NFD.String(remotePath), norm.NFD.String(leaf)) + norm.NFC.String(leaf)
|
||||
fixFilename(mixed, remotePath)
|
||||
// Try NFD
|
||||
fixFilename(norm.NFD.String(remotePath), remotePath)
|
||||
// Try mixed in reverse
|
||||
mixed = strings.TrimSuffix(norm.NFC.String(remotePath), norm.NFC.String(leaf)) + norm.NFD.String(leaf)
|
||||
fixFilename(mixed, remotePath)
|
||||
// check if it's right now, error if not
|
||||
ok, err = fs.FileExists(ctx, fsrc, remotePath)
|
||||
if !ok || err != nil {
|
||||
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
|
||||
return
|
||||
} else {
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
}
|
||||
return
|
||||
default:
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
}
|
||||
|
@ -769,13 +581,6 @@ func (b *bisyncTest) checkArgs(args []string, min, max int) {
|
|||
}
|
||||
}
|
||||
|
||||
func flushCache(f fs.Fs) {
|
||||
dirCacheFlush := f.Features().DirCacheFlush
|
||||
if dirCacheFlush == nil {
|
||||
fs.Errorf(nil, "%v: can't flush dir cache", f)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
||||
opt := &bisync.Options{
|
||||
Workdir: b.workDir,
|
||||
|
@ -784,15 +589,10 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
|||
MaxDelete: bisync.DefaultMaxDelete,
|
||||
CheckFilename: bisync.DefaultCheckFilename,
|
||||
CheckSync: bisync.CheckSyncTrue,
|
||||
TestFn: b.TestFn,
|
||||
}
|
||||
octx, ci := fs.AddConfig(ctx)
|
||||
fs1, fs2 := b.fs1, b.fs2
|
||||
|
||||
// flush cache
|
||||
flushCache(fs1)
|
||||
flushCache(fs2)
|
||||
|
||||
addSubdir := func(path, subdir string) fs.Fs {
|
||||
remote := path + subdir
|
||||
f, err := fs.NewFs(ctx, remote)
|
||||
|
@ -833,41 +633,9 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
|||
require.NoError(b.t, err, "parsing max-delete=%q", val)
|
||||
case "size-only":
|
||||
ci.SizeOnly = true
|
||||
case "ignore-size":
|
||||
ci.IgnoreSize = true
|
||||
case "checksum":
|
||||
ci.CheckSum = true
|
||||
opt.Compare.DownloadHash = true // allows us to test crypt and the like
|
||||
case "compare-all":
|
||||
opt.CompareFlag = "size,modtime,checksum"
|
||||
opt.Compare.DownloadHash = true // allows us to test crypt and the like
|
||||
case "subdir":
|
||||
fs1 = addSubdir(b.path1, val)
|
||||
fs2 = addSubdir(b.path2, val)
|
||||
case "backupdir1":
|
||||
opt.BackupDir1 = val
|
||||
case "backupdir2":
|
||||
opt.BackupDir2 = val
|
||||
case "ignore-listing-checksum":
|
||||
opt.IgnoreListingChecksum = true
|
||||
case "no-norm":
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.IgnoreCaseSync = false
|
||||
case "norm":
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.IgnoreCaseSync = true
|
||||
case "fix-case":
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.IgnoreCaseSync = true
|
||||
ci.FixCase = true
|
||||
case "conflict-resolve":
|
||||
_ = opt.ConflictResolve.Set(val)
|
||||
case "conflict-loser":
|
||||
_ = opt.ConflictLoser.Set(val)
|
||||
case "conflict-suffix":
|
||||
opt.ConflictSuffixFlag = val
|
||||
case "resync-mode":
|
||||
_ = opt.ResyncMode.Set(val)
|
||||
default:
|
||||
return fmt.Errorf("invalid bisync option %q", arg)
|
||||
}
|
||||
|
@ -921,7 +689,7 @@ func (b *bisyncTest) copyFile(ctx context.Context, src, dst, asName string) (err
|
|||
var fsrc, fdst fs.Fs
|
||||
var srcPath, srcFile, dstPath, dstFile string
|
||||
|
||||
switch fsrc, err = fs.NewFs(ctx, src); err {
|
||||
switch fsrc, err = cache.Get(ctx, src); err {
|
||||
case fs.ErrorIsFile:
|
||||
// ok
|
||||
case nil:
|
||||
|
@ -944,7 +712,7 @@ func (b *bisyncTest) copyFile(ctx context.Context, src, dst, asName string) (err
|
|||
if dstFile != "" {
|
||||
dstPath = dst // force directory
|
||||
}
|
||||
if fdst, err = fs.NewFs(ctx, dstPath); err != nil {
|
||||
if fdst, err = cache.Get(ctx, dstPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -961,27 +729,23 @@ func (b *bisyncTest) copyFile(ctx context.Context, src, dst, asName string) (err
|
|||
return operations.CopyFile(fctx, fdst, fsrc, dstFile, srcFile)
|
||||
}
|
||||
|
||||
// listSubdirs is equivalent to `rclone lsf -R [--dirs-only]`
|
||||
func (b *bisyncTest) listSubdirs(ctx context.Context, remote string, DirsOnly bool) error {
|
||||
// listSubdirs is equivalent to `rclone lsf -R --dirs-only`
|
||||
func (b *bisyncTest) listSubdirs(ctx context.Context, remote string) error {
|
||||
f, err := fs.NewFs(ctx, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// flush cache
|
||||
flushCache(f)
|
||||
|
||||
opt := operations.ListJSONOpt{
|
||||
NoModTime: true,
|
||||
NoMimeType: true,
|
||||
DirsOnly: DirsOnly,
|
||||
DirsOnly: true,
|
||||
Recurse: true,
|
||||
}
|
||||
fmt := operations.ListFormat{}
|
||||
fmt.SetDirSlash(true)
|
||||
fmt.AddPath()
|
||||
printItem := func(item *operations.ListJSONItem) error {
|
||||
b.logPrintf("%s - filename hash: %s", fmt.Format(item), stringToHash(item.Name))
|
||||
b.logPrintf("%s", fmt.Format(item))
|
||||
return nil
|
||||
}
|
||||
return operations.ListJSON(ctx, f, "", &opt, printItem)
|
||||
|
@ -1109,7 +873,7 @@ func (b *bisyncTest) compareResults() int {
|
|||
|
||||
if goldenNum != resultNum {
|
||||
log.Print(divider)
|
||||
log.Print(color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:"))
|
||||
log.Printf("MISCOMPARE - Number of Golden and Results files do not match:")
|
||||
log.Printf(" Golden count: %d", goldenNum)
|
||||
log.Printf(" Result count: %d", resultNum)
|
||||
log.Printf(" Golden files: %s", strings.Join(goldenFiles, ", "))
|
||||
|
@ -1145,7 +909,7 @@ func (b *bisyncTest) compareResults() int {
|
|||
require.NoError(b.t, os.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
|
||||
}
|
||||
|
||||
if goldenText == resultText || strings.Contains(resultText, ".DS_Store") {
|
||||
if goldenText == resultText {
|
||||
continue
|
||||
}
|
||||
errorCount++
|
||||
|
@ -1159,7 +923,7 @@ func (b *bisyncTest) compareResults() int {
|
|||
require.NoError(b.t, err, "diff failed")
|
||||
|
||||
log.Print(divider)
|
||||
log.Printf(color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
||||
log.Printf("| MISCOMPARE -Golden vs +Results for %s", file)
|
||||
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
|
||||
log.Printf("| %s", strings.TrimSpace(line))
|
||||
}
|
||||
|
@ -1187,10 +951,6 @@ func (b *bisyncTest) storeGolden() {
|
|||
if fileType(fileName) == "lock" {
|
||||
continue
|
||||
}
|
||||
if fileName == "backupdirs" {
|
||||
log.Printf("skipping: %v", fileName)
|
||||
continue
|
||||
}
|
||||
goldName := b.toGolden(fileName)
|
||||
if goldName != fileName {
|
||||
targetPath := filepath.Join(b.workDir, goldName)
|
||||
|
@ -1212,10 +972,6 @@ func (b *bisyncTest) storeGolden() {
|
|||
if fileType(fileName) == "lock" {
|
||||
continue
|
||||
}
|
||||
if fileName == "backupdirs" {
|
||||
log.Printf("skipping: %v", fileName)
|
||||
continue
|
||||
}
|
||||
text := b.mangleResult(b.goldenDir, fileName, true)
|
||||
|
||||
goldName := b.toGolden(fileName)
|
||||
|
@ -1232,27 +988,17 @@ func (b *bisyncTest) storeGolden() {
|
|||
|
||||
// mangleResult prepares test logs or listings for comparison
|
||||
func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
if file == "backupdirs" {
|
||||
return "skipping backupdirs"
|
||||
}
|
||||
buf, err := os.ReadFile(filepath.Join(dir, file))
|
||||
require.NoError(b.t, err)
|
||||
|
||||
// normalize unicode so tets are runnable on macOS
|
||||
buf = norm.NFC.Bytes(buf)
|
||||
|
||||
text := string(buf)
|
||||
|
||||
switch fileType(strings.TrimSuffix(file, ".sav")) {
|
||||
case "queue":
|
||||
lines := strings.Split(text, eol)
|
||||
sort.Strings(lines)
|
||||
for i, line := range lines {
|
||||
lines[i] = normalizeEncoding(line)
|
||||
}
|
||||
return joinLines(lines)
|
||||
case "listing":
|
||||
return b.mangleListing(text, golden, file)
|
||||
return mangleListing(text, golden)
|
||||
case "log":
|
||||
// fall thru
|
||||
default:
|
||||
|
@ -1260,16 +1006,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
|||
}
|
||||
|
||||
// Adapt log lines to the golden way.
|
||||
// First replace filenames with whitespace
|
||||
// some backends (such as crypt) log them on multiple lines due to encoding differences, while others (local) do not
|
||||
wsrep := []string{
|
||||
"subdir with" + eol + "white space.txt/file2 with" + eol + "white space.txt",
|
||||
"subdir with white space.txt/file2 with white space.txt",
|
||||
}
|
||||
whitespaceJoiner := strings.NewReplacer(wsrep...)
|
||||
s := whitespaceJoiner.Replace(string(buf))
|
||||
|
||||
lines := strings.Split(s, eol)
|
||||
lines := strings.Split(string(buf), eol)
|
||||
pathReplacer := b.newReplacer(true)
|
||||
|
||||
rep := logReplacements
|
||||
|
@ -1353,7 +1090,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
|||
}
|
||||
|
||||
// mangleListing sorts listing lines before comparing.
|
||||
func (b *bisyncTest) mangleListing(text string, golden bool, file string) string {
|
||||
func mangleListing(text string, golden bool) string {
|
||||
lines := strings.Split(text, eol)
|
||||
|
||||
hasHeader := len(lines) > 0 && strings.HasPrefix(lines[0], bisync.ListingHeader)
|
||||
|
@ -1377,43 +1114,12 @@ func (b *bisyncTest) mangleListing(text string, golden bool, file string) string
|
|||
return getFile(lines[i]) < getFile(lines[j])
|
||||
})
|
||||
|
||||
// parse whether this is Path1 or Path2 (so we can apply per-Fs precision/hash settings)
|
||||
isPath1 := strings.Contains(file, ".path1.lst")
|
||||
f := b.fs2
|
||||
if isPath1 {
|
||||
f = b.fs1
|
||||
}
|
||||
|
||||
// account for differences in backend features when comparing
|
||||
// Store hash as golden but ignore when comparing.
|
||||
if !golden {
|
||||
for i, s := range lines {
|
||||
// Store hash as golden but ignore when comparing (only if no md5 support).
|
||||
match := regex.FindStringSubmatch(strings.TrimSpace(s))
|
||||
if match != nil && match[2] != "-" && (!b.fs1.Hashes().Contains(hash.MD5) || !b.fs2.Hashes().Contains(hash.MD5)) { // if hash is not empty and either side lacks md5
|
||||
lines[i] = match[1] + "-" + match[3] + match[4] // replace it with "-" for comparison purposes (see #5679)
|
||||
}
|
||||
// account for modtime precision
|
||||
var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`)
|
||||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
const lineFormat = "%s %8d %s %s %s %q\n"
|
||||
var TZ = time.UTC
|
||||
fields := lineRegex.FindStringSubmatch(strings.TrimSuffix(lines[i], "\n"))
|
||||
if fields != nil {
|
||||
sizeVal, sizeErr := strconv.ParseInt(fields[2], 10, 64)
|
||||
if sizeErr == nil {
|
||||
// account for filename encoding differences by normalizing to OS encoding
|
||||
fields[6] = normalizeEncoding(fields[6])
|
||||
timeStr := fields[5]
|
||||
if f.Precision() == fs.ModTimeNotSupported {
|
||||
lines[i] = fmt.Sprintf(lineFormat, fields[1], sizeVal, fields[3], fields[4], "-", fields[6])
|
||||
continue
|
||||
}
|
||||
timeVal, timeErr := time.ParseInLocation(timeFormat, timeStr, TZ)
|
||||
if timeErr == nil {
|
||||
timeRound := timeVal.Round(f.Precision() * 2)
|
||||
lines[i] = fmt.Sprintf(lineFormat, fields[1], sizeVal, fields[3], fields[4], timeRound, fields[6])
|
||||
}
|
||||
}
|
||||
if match != nil && match[2] != "-" {
|
||||
lines[i] = match[1] + "-" + match[3] + match[4]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1457,15 +1163,12 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
|
|||
b.dataDir + slash, "{datadir/}",
|
||||
b.testDir + slash, "{testdir/}",
|
||||
b.workDir + slash, "{workdir/}",
|
||||
b.fs1.String(), "{path1String}",
|
||||
b.fs2.String(), "{path2String}",
|
||||
b.path1, "{path1/}",
|
||||
b.path2, "{path2/}",
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
|
||||
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
|
||||
strings.TrimSuffix(b.path2, slash), "{path2}",
|
||||
b.workDir, "{workdir}",
|
||||
b.sessionName, "{session}",
|
||||
}
|
||||
if fixSlash {
|
||||
|
@ -1490,10 +1193,6 @@ func (b *bisyncTest) toGolden(name string) string {
|
|||
name = strings.ReplaceAll(name, b.canonPath1, goldenCanonBase)
|
||||
name = strings.ReplaceAll(name, b.canonPath2, goldenCanonBase)
|
||||
name = strings.TrimSuffix(name, ".sav")
|
||||
|
||||
// normalize unicode so tets are runnable on macOS
|
||||
name = norm.NFC.String(name)
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
|
@ -1514,22 +1213,8 @@ func (b *bisyncTest) ensureDir(parent, dir string, optional bool) string {
|
|||
func (b *bisyncTest) listDir(dir string) (names []string) {
|
||||
files, err := os.ReadDir(dir)
|
||||
require.NoError(b.t, err)
|
||||
ignoreIt := func(file string) bool {
|
||||
ignoreList := []string{
|
||||
// ".lst-control", ".lst-dry-control", ".lst-old", ".lst-dry-old",
|
||||
".DS_Store"}
|
||||
for _, s := range ignoreList {
|
||||
if strings.Contains(file, s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
for _, file := range files {
|
||||
if ignoreIt(file.Name()) {
|
||||
continue
|
||||
}
|
||||
names = append(names, filepath.Base(norm.NFC.String(file.Name())))
|
||||
names = append(names, filepath.Base(file.Name()))
|
||||
}
|
||||
// Sort files to ensure comparability.
|
||||
sort.Strings(names)
|
||||
|
@ -1545,7 +1230,7 @@ func fileType(fileName string) string {
|
|||
return "log"
|
||||
}
|
||||
switch filepath.Ext(fileName) {
|
||||
case ".lst", ".lst-new", ".lst-err", ".lst-dry", ".lst-dry-new", ".lst-old", ".lst-dry-old", ".lst-control", ".lst-dry-control":
|
||||
case ".lst", ".lst-new", ".lst-err", ".lst-dry", ".lst-dry-new":
|
||||
return "listing"
|
||||
case ".que":
|
||||
return "queue"
|
||||
|
@ -1569,36 +1254,3 @@ func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
|
|||
require.NoError(b.t, err, "writing log file")
|
||||
}
|
||||
}
|
||||
|
||||
// account for filename encoding differences between remotes by normalizing to OS encoding
|
||||
func normalizeEncoding(s string) string {
|
||||
if s == "" || s == "." {
|
||||
return s
|
||||
}
|
||||
nameVal, err := strconv.Unquote(s)
|
||||
if err != nil {
|
||||
nameVal = s
|
||||
}
|
||||
nameVal = filepath.Clean(nameVal)
|
||||
nameVal = encoder.OS.FromStandardPath(nameVal)
|
||||
return strconv.Quote(encoder.OS.ToStandardPath(filepath.ToSlash(nameVal)))
|
||||
}
|
||||
|
||||
func stringToHash(s string) string {
|
||||
ht := hash.MD5
|
||||
hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
fs.Errorf(s, "hash unsupported: %v", err)
|
||||
}
|
||||
|
||||
_, err = hasher.Write([]byte(s))
|
||||
if err != nil {
|
||||
fs.Errorf(s, "failed to write to hasher: %v", err)
|
||||
}
|
||||
|
||||
sum, err := hasher.SumString(ht, false)
|
||||
if err != nil {
|
||||
fs.Errorf(s, "hasher returned an error: %v", err)
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
|
|
@ -1,271 +0,0 @@
|
|||
package bisync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/cmd/check"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
var hashType hash.Type
|
||||
var fsrc, fdst fs.Fs
|
||||
var fcrypt *crypt.Fs
|
||||
|
||||
// WhichCheck determines which CheckFn we should use based on the Fs types
|
||||
// It is more robust and accurate than Check because
|
||||
// it will fallback to CryptCheck or DownloadCheck instead of --size-only!
|
||||
// it returns the *operations.CheckOpt with the CheckFn set.
|
||||
func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
|
||||
ci := fs.GetConfig(ctx)
|
||||
common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes())
|
||||
|
||||
// note that ci.IgnoreChecksum doesn't change the behavior of Check -- it's just a way to opt-out of cryptcheck/download
|
||||
if common.Count() > 0 || ci.SizeOnly || ci.IgnoreChecksum {
|
||||
// use normal check
|
||||
opt.Check = CheckFn
|
||||
return opt
|
||||
}
|
||||
|
||||
FsrcCrypt, srcIsCrypt := opt.Fsrc.(*crypt.Fs)
|
||||
FdstCrypt, dstIsCrypt := opt.Fdst.(*crypt.Fs)
|
||||
|
||||
if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) {
|
||||
// if both are crypt or only dst is crypt
|
||||
hashType = FdstCrypt.UnWrap().Hashes().GetOne()
|
||||
if hashType != hash.None {
|
||||
// use cryptcheck
|
||||
fsrc = opt.Fsrc
|
||||
fdst = opt.Fdst
|
||||
fcrypt = FdstCrypt
|
||||
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
||||
opt.Check = CryptCheckFn
|
||||
return opt
|
||||
}
|
||||
} else if srcIsCrypt && !dstIsCrypt {
|
||||
// if only src is crypt
|
||||
hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
|
||||
if hashType != hash.None {
|
||||
// use reverse cryptcheck
|
||||
fsrc = opt.Fdst
|
||||
fdst = opt.Fsrc
|
||||
fcrypt = FsrcCrypt
|
||||
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
||||
opt.Check = ReverseCryptCheckFn
|
||||
return opt
|
||||
}
|
||||
}
|
||||
|
||||
// if we've gotten this far, niether check or cryptcheck will work, so use --download
|
||||
fs.Infof(fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
|
||||
opt.Check = DownloadCheckFn
|
||||
return opt
|
||||
}
|
||||
|
||||
// CheckFn is a slightly modified version of Check
|
||||
func CheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
same, ht, err := operations.CheckHashes(ctx, src, dst)
|
||||
if err != nil {
|
||||
return true, false, err
|
||||
}
|
||||
if ht == hash.None {
|
||||
return false, true, nil
|
||||
}
|
||||
if !same {
|
||||
err = fmt.Errorf("%v differ", ht)
|
||||
fs.Errorf(src, "%v", err)
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// CryptCheckFn is a slightly modified version of CryptCheck
|
||||
func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
cryptDst := dst.(*crypt.Object)
|
||||
underlyingDst := cryptDst.UnWrap()
|
||||
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
|
||||
if err != nil {
|
||||
return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err)
|
||||
}
|
||||
if underlyingHash == "" {
|
||||
return false, true, nil
|
||||
}
|
||||
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
|
||||
if err != nil {
|
||||
return true, false, fmt.Errorf("error computing hash: %w", err)
|
||||
}
|
||||
if cryptHash == "" {
|
||||
return false, true, nil
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.Debugf(src, err.Error())
|
||||
// using same error msg as CheckFn so integration tests match
|
||||
err = fmt.Errorf("%v differ", hashType)
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched
|
||||
// result: src is crypt, dst is non-crypt
|
||||
func ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
return CryptCheckFn(ctx, src, dst)
|
||||
}
|
||||
|
||||
// DownloadCheckFn is a slightly modified version of Check with --download
|
||||
func DownloadCheckFn(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error) {
|
||||
differ, err = operations.CheckIdenticalDownload(ctx, a, b)
|
||||
if err != nil {
|
||||
return true, true, fmt.Errorf("failed to download: %w", err)
|
||||
}
|
||||
return differ, false, nil
|
||||
}
|
||||
|
||||
// check potential conflicts (to avoid renaming if already identical)
|
||||
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
|
||||
matches := bilib.Names{}
|
||||
if filterCheck.HaveFilesFrom() {
|
||||
fs.Debugf(nil, "There are potential conflicts to check.")
|
||||
|
||||
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
|
||||
if checkopterr != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
return matches, checkopterr
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt.Match = new(bytes.Buffer)
|
||||
|
||||
opt = WhichCheck(ctxCheck, opt)
|
||||
|
||||
fs.Infof(nil, "Checking potential conflicts...")
|
||||
check := operations.CheckFn(ctxCheck, opt)
|
||||
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
||||
|
||||
//reset error count, because we don't want to count check errors as bisync errors
|
||||
accounting.Stats(ctxCheck).ResetErrors()
|
||||
|
||||
//return the list of identical files to check against later
|
||||
if len(fmt.Sprint(opt.Match)) > 0 {
|
||||
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
||||
}
|
||||
if matches.NotEmpty() {
|
||||
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
|
||||
} else {
|
||||
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
|
||||
}
|
||||
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// WhichEqual is similar to WhichCheck, but checks a single object.
|
||||
// Returns true if the objects are equal, false if they differ or if we don't know
|
||||
func WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
|
||||
opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst)
|
||||
if checkopterr != nil {
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt = WhichCheck(ctx, opt)
|
||||
differ, noHash, err := opt.Check(ctx, dst, src)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "failed to check: %v", err)
|
||||
return false
|
||||
}
|
||||
if noHash {
|
||||
fs.Errorf(src, "failed to check as hash is missing")
|
||||
return false
|
||||
}
|
||||
return !differ
|
||||
}
|
||||
|
||||
// Replaces the standard Equal func with one that also considers checksum
|
||||
// Note that it also updates the modtime the same way as Sync
|
||||
func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.CheckSum = false // force checksum off so modtime is evaluated if needed
|
||||
// modtime and size settings should already be set correctly for Equal
|
||||
var equalFn operations.EqualFn = func(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool {
|
||||
fs.Debugf(src, "evaluating...")
|
||||
equal := false
|
||||
logger, _ := operations.GetLogger(ctx)
|
||||
// temporarily unset logger, we don't want Equal to duplicate it
|
||||
noop := func(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
|
||||
fs.Debugf(src, "equal skipped")
|
||||
}
|
||||
ctxNoLogger := operations.WithLogger(ctx, noop)
|
||||
|
||||
timeSizeEqualFn := func() (equal bool, skipHash bool) { return operations.Equal(ctxNoLogger, src, dst), false } // normally use Equal()
|
||||
if b.opt.ResyncMode == PreferOlder || b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller {
|
||||
timeSizeEqualFn = func() (equal bool, skipHash bool) { return b.resyncTimeSizeEqual(ctxNoLogger, src, dst) } // but override for --resync-mode older, larger, smaller
|
||||
}
|
||||
skipHash := false // (note that we might skip it anyway based on compare/ht settings)
|
||||
equal, skipHash = timeSizeEqualFn()
|
||||
if equal && !skipHash {
|
||||
whichHashType := func(f fs.Info) hash.Type {
|
||||
ht := getHashType(f.Name())
|
||||
if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync {
|
||||
ht = f.Hashes().GetOne()
|
||||
}
|
||||
return ht
|
||||
}
|
||||
srcHash, _ := src.Hash(ctx, whichHashType(src.Fs()))
|
||||
dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs()))
|
||||
srcHash, _ = tryDownloadHash(ctx, src, srcHash)
|
||||
dstHash, _ = tryDownloadHash(ctx, dst, dstHash)
|
||||
equal = !hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
|
||||
}
|
||||
if equal {
|
||||
logger(ctx, operations.Match, src, dst, nil)
|
||||
fs.Debugf(src, "EqualFn: files are equal")
|
||||
return true
|
||||
}
|
||||
logger(ctx, operations.Differ, src, dst, nil)
|
||||
fs.Debugf(src, "EqualFn: files are NOT equal")
|
||||
return false
|
||||
}
|
||||
return operations.WithEqualFn(ctx, equalFn)
|
||||
}
|
||||
|
||||
func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, skipHash bool) {
|
||||
switch b.opt.ResyncMode {
|
||||
case PreferLarger, PreferSmaller:
|
||||
// note that arg order is path1, path2, regardless of src/dst
|
||||
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
||||
if sizeDiffers(path1.Size(), path2.Size()) {
|
||||
winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
|
||||
// don't need to check/update modtime here, as sizes definitely differ and something will be transferred
|
||||
return b.resyncWinningPathToEqual(winningPath), b.resyncWinningPathToEqual(winningPath) // skip hash check if true
|
||||
}
|
||||
// sizes equal or don't know, so continue to checking time/hash, if applicable
|
||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||
case PreferOlder:
|
||||
// note that arg order is path1, path2, regardless of src/dst
|
||||
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
||||
if timeDiffers(ctxNoLogger, path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Fs(), path2.Fs()) {
|
||||
winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
|
||||
// if src is winner, proceed with equal to check size/hash and possibly just update dest modtime instead of transferring
|
||||
if !b.resyncWinningPathToEqual(winningPath) {
|
||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||
}
|
||||
// if dst is winner (and definitely unequal), do not proceed further as we want dst to overwrite src regardless of size difference, and we do not want dest modtime updated
|
||||
return true, true
|
||||
}
|
||||
// times equal or don't know, so continue to checking size/hash, if applicable
|
||||
}
|
||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||
}
|
|
@ -25,13 +25,9 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// TestFunc allows mocking errors during tests
|
||||
type TestFunc func()
|
||||
|
||||
// Options keep bisync options
|
||||
type Options struct {
|
||||
Resync bool // whether or not this is a resync
|
||||
ResyncMode Prefer // which mode to use for resync
|
||||
Resync bool
|
||||
CheckAccess bool
|
||||
CheckFilename string
|
||||
CheckSync CheckSyncMode
|
||||
|
@ -41,25 +37,11 @@ type Options struct {
|
|||
Force bool
|
||||
FiltersFile string
|
||||
Workdir string
|
||||
OrigBackupDir string
|
||||
BackupDir1 string
|
||||
BackupDir2 string
|
||||
DryRun bool
|
||||
NoCleanup bool
|
||||
SaveQueues bool // save extra debugging files (test only flag)
|
||||
IgnoreListingChecksum bool
|
||||
Resilient bool
|
||||
Recover bool
|
||||
TestFn TestFunc // test-only option, for mocking errors
|
||||
Compare CompareOpt
|
||||
CompareFlag string
|
||||
DebugName string
|
||||
MaxLock time.Duration
|
||||
ConflictResolve Prefer
|
||||
ConflictLoser ConflictLoserAction
|
||||
ConflictSuffixFlag string
|
||||
ConflictSuffix1 string
|
||||
ConflictSuffix2 string
|
||||
}
|
||||
|
||||
// Default values
|
||||
|
@ -117,14 +99,9 @@ func (x *CheckSyncMode) Type() string {
|
|||
var Opt Options
|
||||
|
||||
func init() {
|
||||
Opt.MaxLock = 0
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
// when adding new flags, remember to also update the rc params:
|
||||
// cmd/bisync/rc.go cmd/bisync/help.go (not docs/content/rc.md)
|
||||
// and the Command line syntax section of docs/content/bisync.md (it doesn't update automatically)
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ResyncMode, "resync-mode", "", "During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "")
|
||||
|
@ -133,24 +110,10 @@ func init() {
|
|||
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.BackupDir1, "backup-dir1", "", Opt.BackupDir1, "--backup-dir for Path1. Must be a non-overlapping path on the same remote.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.BackupDir2, "backup-dir2", "", Opt.BackupDir2, "--backup-dir for Path2. Must be a non-overlapping path on the same remote.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.DebugName, "debugname", "", Opt.DebugName, "Debug by tracking one file at various points throughout a bisync run (when -v or -vv)", "")
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.SlowHashSyncOnly, "slow-hash-sync-only", "", Opt.Compare.SlowHashSyncOnly, "Ignore slow checksums for listings and deltas, but still consider them during sync calls.", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.DownloadHash, "download-hash", "", Opt.Compare.DownloadHash, "Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)", "")
|
||||
flags.DurationVarP(cmdFlags, &Opt.MaxLock, "max-lock", "", Opt.MaxLock, "Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ConflictResolve, "conflict-resolve", "", "Automatically resolve conflicts by preferring the version that is: "+ConflictResolveList+" (default: none)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ConflictLoser, "conflict-loser", "", "Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): "+ConflictLoserList+" (default: num)", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.ConflictSuffixFlag, "conflict-suffix", "", Opt.ConflictSuffixFlag, "Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')", "")
|
||||
_ = cmdFlags.MarkHidden("debugname")
|
||||
_ = cmdFlags.MarkHidden("localtime")
|
||||
}
|
||||
|
||||
// bisync command definition
|
||||
|
@ -161,11 +124,8 @@ var commandDefinition = &cobra.Command{
|
|||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
"groups": "Filter,Copy,Important",
|
||||
"status": "Beta",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
// NOTE: avoid putting too much handling here, as it won't apply to the rc.
|
||||
// Generally it's best to put init-type stuff in Bisync() (operations.go)
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fs1, file1, fs2, file2 := cmd.NewFsSrcDstFiles(args)
|
||||
if file1 != "" || file2 != "" {
|
||||
|
@ -189,7 +149,7 @@ var commandDefinition = &cobra.Command{
|
|||
}
|
||||
}
|
||||
|
||||
fs.Logf(nil, "bisync is IN BETA. Don't use in production!")
|
||||
fs.Logf(nil, "bisync is EXPERIMENTAL. Don't use in production!")
|
||||
cmd.Run(false, true, command, func() error {
|
||||
err := Bisync(ctx, fs1, fs2, &opt)
|
||||
if err == ErrBisyncAborted {
|
||||
|
|
|
@ -1,309 +0,0 @@
|
|||
package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
mutex "sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// CompareOpt describes the Compare options in force
|
||||
type CompareOpt = struct {
|
||||
Modtime bool
|
||||
Size bool
|
||||
Checksum bool
|
||||
HashType1 hash.Type
|
||||
HashType2 hash.Type
|
||||
NoSlowHash bool
|
||||
SlowHashSyncOnly bool
|
||||
SlowHashDetected bool
|
||||
DownloadHash bool
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// defaults
|
||||
b.opt.Compare.Size = true
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Checksum = false
|
||||
|
||||
if ci.SizeOnly {
|
||||
b.opt.Compare.Size = true
|
||||
b.opt.Compare.Modtime = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else if ci.CheckSum && !b.opt.IgnoreListingChecksum {
|
||||
b.opt.Compare.Size = true
|
||||
b.opt.Compare.Modtime = false
|
||||
b.opt.Compare.Checksum = true
|
||||
}
|
||||
|
||||
if ci.IgnoreSize {
|
||||
b.opt.Compare.Size = false
|
||||
}
|
||||
|
||||
err = b.setFromCompareFlag(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.fs1.Features().SlowHash || b.fs2.Features().SlowHash {
|
||||
b.opt.Compare.SlowHashDetected = true
|
||||
}
|
||||
if b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
b.setHashType(ci)
|
||||
}
|
||||
|
||||
// Checks and Warnings
|
||||
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
|
||||
ci.CheckSum = false
|
||||
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
||||
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
|
||||
}
|
||||
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
|
||||
}
|
||||
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
||||
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
||||
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
|
||||
// note: --checksum will still affect the internal sync calls
|
||||
}
|
||||
}
|
||||
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
|
||||
}
|
||||
|
||||
notSupported := func(label string, value bool, opt *bool) {
|
||||
if value {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
|
||||
*opt = false
|
||||
}
|
||||
}
|
||||
notSupported("--update", ci.UpdateOlder, &ci.UpdateOlder)
|
||||
notSupported("--no-check-dest", ci.NoCheckDest, &ci.NoCheckDest)
|
||||
notSupported("--no-traverse", ci.NoTraverse, &ci.NoTraverse)
|
||||
// TODO: thorough search for other flags that should be on this list...
|
||||
|
||||
prettyprint(b.opt.Compare, "Bisyncing with Comparison Settings", fs.LogLevelInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if the sizes are definitely different.
|
||||
// returns false if equal, or if either is unknown.
|
||||
func sizeDiffers(a, b int64) bool {
|
||||
if a < 0 || b < 0 {
|
||||
return false
|
||||
}
|
||||
return a != b
|
||||
}
|
||||
|
||||
// returns true if the hashes are definitely different.
|
||||
// returns false if equal, or if either is unknown.
|
||||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||
if a == "" || b == "" {
|
||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if ht1 != ht2 {
|
||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return a != b
|
||||
}
|
||||
|
||||
// chooses hash type, giving priority to types both sides have in common
|
||||
func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
downloadHash = b.opt.Compare.DownloadHash
|
||||
if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected {
|
||||
fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.")
|
||||
} else {
|
||||
common := b.fs1.Hashes().Overlap(b.fs2.Hashes())
|
||||
if common.Count() > 0 && common.GetOne() != hash.None {
|
||||
ht := common.GetOne()
|
||||
b.opt.Compare.HashType1 = ht
|
||||
b.opt.Compare.HashType2 = ht
|
||||
if !b.opt.Compare.SlowHashSyncOnly || !b.opt.Compare.SlowHashDetected {
|
||||
return
|
||||
}
|
||||
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
|
||||
b.opt.Compare.SlowHashSyncOnly = false
|
||||
b.opt.Compare.NoSlowHash = true
|
||||
ci.CheckSum = false
|
||||
}
|
||||
}
|
||||
|
||||
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
|
||||
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
||||
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType1 != hash.None {
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
|
||||
}
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType2 != hash.None {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
|
||||
b.opt.Compare.Checksum = false
|
||||
ci.CheckSum = false
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
}
|
||||
|
||||
// returns true if the times are definitely different (by more than the modify window).
|
||||
// returns false if equal, within modify window, or if either is unknown.
|
||||
// considers precision per-Fs.
|
||||
func timeDiffers(ctx context.Context, a, b time.Time, fsA, fsB fs.Info) bool {
|
||||
modifyWindow := fs.GetModifyWindow(ctx, fsA, fsB)
|
||||
if modifyWindow == fs.ModTimeNotSupported {
|
||||
return false
|
||||
}
|
||||
if a.IsZero() || b.IsZero() {
|
||||
fs.Logf(fsA, "Fs supports modtime, but modtime is missing")
|
||||
return false
|
||||
}
|
||||
dt := b.Sub(a)
|
||||
if dt < modifyWindow && dt > -modifyWindow {
|
||||
fs.Debugf(a, "modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow)
|
||||
return false
|
||||
}
|
||||
|
||||
fs.Debugf(a, "Modification times differ by %s: %v, %v", dt, a, b)
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
if b.opt.CompareFlag == "" {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exlcusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
case "size":
|
||||
b.opt.Compare.Size = true
|
||||
CompareFlag.Size = true
|
||||
case "modtime":
|
||||
b.opt.Compare.Modtime = true
|
||||
CompareFlag.Modtime = true
|
||||
case "checksum":
|
||||
b.opt.Compare.Checksum = true
|
||||
CompareFlag.Checksum = true
|
||||
default:
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
|
||||
}
|
||||
}
|
||||
|
||||
// exclusions (override defaults, only if --compare != "")
|
||||
if !CompareFlag.Size {
|
||||
b.opt.Compare.Size = false
|
||||
}
|
||||
if !CompareFlag.Modtime {
|
||||
b.opt.Compare.Modtime = false
|
||||
}
|
||||
if !CompareFlag.Checksum {
|
||||
b.opt.Compare.Checksum = false
|
||||
}
|
||||
|
||||
// override sync flags to match
|
||||
ci := fs.GetConfig(ctx)
|
||||
if b.opt.Compare.Checksum {
|
||||
ci.CheckSum = true
|
||||
}
|
||||
if b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
ci.CheckSum = false
|
||||
}
|
||||
if !b.opt.Compare.Size {
|
||||
ci.IgnoreSize = true
|
||||
}
|
||||
if !b.opt.Compare.Modtime {
|
||||
ci.UseServerModTime = true
|
||||
}
|
||||
if b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
ci.SizeOnly = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
|
||||
var downloadHash bool
|
||||
var downloadHashWarn mutex.Once
|
||||
var firstDownloadHash mutex.Once
|
||||
|
||||
func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
|
||||
if hashVal != "" || !downloadHash {
|
||||
return hashVal, nil
|
||||
}
|
||||
obj, ok := o.(fs.Object)
|
||||
if !ok {
|
||||
fs.Infof(o, "failed to download hash -- not an fs.Object")
|
||||
return hashVal, fs.ErrorObjectNotFound
|
||||
}
|
||||
if o.Size() < 0 {
|
||||
downloadHashWarn.Do(func() {
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
||||
})
|
||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||
return hashVal, hash.ErrUnsupported
|
||||
}
|
||||
|
||||
firstDownloadHash.Do(func() {
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
||||
})
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||
defer func() {
|
||||
tr.Done(ctx, nil)
|
||||
}()
|
||||
|
||||
sum, err := operations.HashSum(ctx, hash.MD5, false, true, obj)
|
||||
if err != nil {
|
||||
fs.Infof(o, "DownloadHash -- hash: %v, err: %v", sum, err)
|
||||
} else {
|
||||
fs.Debugf(o, "DownloadHash -- hash: %v", sum)
|
||||
}
|
||||
return sum, err
|
||||
}
|
|
@ -3,18 +3,19 @@
|
|||
package bisync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/cmd/check"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// delta
|
||||
|
@ -25,17 +26,14 @@ const (
|
|||
deltaNew delta = 1 << iota
|
||||
deltaNewer
|
||||
deltaOlder
|
||||
deltaLarger
|
||||
deltaSmaller
|
||||
deltaSize
|
||||
deltaHash
|
||||
deltaDeleted
|
||||
)
|
||||
|
||||
const (
|
||||
deltaSize delta = deltaLarger | deltaSmaller
|
||||
deltaTime delta = deltaNewer | deltaOlder
|
||||
deltaModified delta = deltaTime | deltaSize | deltaHash
|
||||
deltaOther delta = deltaNew | deltaTime | deltaSize | deltaHash
|
||||
deltaModified delta = deltaNewer | deltaOlder | deltaSize | deltaHash | deltaDeleted
|
||||
deltaOther delta = deltaNew | deltaNewer | deltaOlder
|
||||
)
|
||||
|
||||
func (d delta) is(cond delta) bool {
|
||||
|
@ -45,9 +43,6 @@ func (d delta) is(cond delta) bool {
|
|||
// deltaSet
|
||||
type deltaSet struct {
|
||||
deltas map[string]delta
|
||||
size map[string]int64
|
||||
time map[string]time.Time
|
||||
hash map[string]string
|
||||
opt *Options
|
||||
fs fs.Fs // base filesystem
|
||||
msg string // filesystem name for logging
|
||||
|
@ -79,77 +74,71 @@ func (ds *deltaSet) printStats() {
|
|||
}
|
||||
nAll := len(ds.deltas)
|
||||
nNew := 0
|
||||
nMod := 0
|
||||
nTime := 0
|
||||
nNewer := 0
|
||||
nOlder := 0
|
||||
nSize := 0
|
||||
nLarger := 0
|
||||
nSmaller := 0
|
||||
nHash := 0
|
||||
nDeleted := 0
|
||||
for _, d := range ds.deltas {
|
||||
if d.is(deltaNew) {
|
||||
nNew++
|
||||
}
|
||||
if d.is(deltaModified) {
|
||||
nMod++
|
||||
}
|
||||
if d.is(deltaTime) {
|
||||
nTime++
|
||||
}
|
||||
if d.is(deltaNewer) {
|
||||
nNewer++
|
||||
}
|
||||
if d.is(deltaOlder) {
|
||||
nOlder++
|
||||
}
|
||||
if d.is(deltaSize) {
|
||||
nSize++
|
||||
}
|
||||
if d.is(deltaLarger) {
|
||||
nLarger++
|
||||
}
|
||||
if d.is(deltaSmaller) {
|
||||
nSmaller++
|
||||
}
|
||||
if d.is(deltaHash) {
|
||||
nHash++
|
||||
}
|
||||
if d.is(deltaDeleted) {
|
||||
nDeleted++
|
||||
}
|
||||
}
|
||||
if nAll != nNew+nMod+nDeleted {
|
||||
fs.Errorf(nil, "something doesn't add up! %4d != %4d + %4d + %4d", nAll, nNew, nMod, nDeleted)
|
||||
}
|
||||
fs.Infof(nil, "%s: %4d changes: "+Color(terminal.GreenFg, "%4d new")+", "+Color(terminal.YellowFg, "%4d modified")+", "+Color(terminal.RedFg, "%4d deleted"),
|
||||
ds.msg, nAll, nNew, nMod, nDeleted)
|
||||
if nMod > 0 {
|
||||
details := []string{}
|
||||
if nTime > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d newer"), nNewer))
|
||||
details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d older"), nOlder))
|
||||
fs.Infof(nil, "%s: %4d changes: %4d new, %4d newer, %4d older, %4d deleted",
|
||||
ds.msg, nAll, nNew, nNewer, nOlder, nDeleted)
|
||||
}
|
||||
|
||||
// check potential conflicts (to avoid renaming if already identical)
|
||||
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
|
||||
matches := bilib.Names{}
|
||||
if filterCheck.HaveFilesFrom() {
|
||||
fs.Debugf(nil, "There are potential conflicts to check.")
|
||||
|
||||
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
|
||||
if checkopterr != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
return matches, checkopterr
|
||||
}
|
||||
if nSize > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d larger"), nLarger))
|
||||
details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d smaller"), nSmaller))
|
||||
defer close()
|
||||
|
||||
opt.Match = new(bytes.Buffer)
|
||||
|
||||
// TODO: consider using custom CheckFn to act like cryptcheck, if either fs is a crypt remote and -c has been passed
|
||||
// note that cryptCheck() is not currently exported
|
||||
|
||||
fs.Infof(nil, "Checking potential conflicts...")
|
||||
check := operations.Check(ctxCheck, opt)
|
||||
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
||||
|
||||
//reset error count, because we don't want to count check errors as bisync errors
|
||||
accounting.Stats(ctxCheck).ResetErrors()
|
||||
|
||||
//return the list of identical files to check against later
|
||||
if len(fmt.Sprint(opt.Match)) > 0 {
|
||||
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
||||
}
|
||||
if nHash > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d hash differs"), nHash))
|
||||
}
|
||||
if (nNewer+nOlder != nTime) || (nLarger+nSmaller != nSize) || (nMod > nTime+nSize+nHash) {
|
||||
fs.Errorf(nil, "something doesn't add up!")
|
||||
if matches.NotEmpty() {
|
||||
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
|
||||
} else {
|
||||
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
|
||||
}
|
||||
|
||||
fs.Infof(nil, "(%s: %s)", Color(terminal.YellowFg, "Modified"), strings.Join(details, ", "))
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// findDeltas
|
||||
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string, now *fileList, msg string) (ds *deltaSet, err error) {
|
||||
var old *fileList
|
||||
newListing := oldListing + "-new"
|
||||
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newListing, msg string) (ds *deltaSet, err error) {
|
||||
var old, now *fileList
|
||||
|
||||
old, err = b.loadListing(oldListing)
|
||||
if err != nil {
|
||||
|
@ -161,6 +150,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
|||
return
|
||||
}
|
||||
|
||||
now, err = b.makeListing(fctx, f, newListing)
|
||||
if err == nil {
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
}
|
||||
|
@ -170,9 +160,6 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
|||
|
||||
ds = &deltaSet{
|
||||
deltas: map[string]delta{},
|
||||
size: map[string]int64{},
|
||||
time: map[string]time.Time{},
|
||||
hash: map[string]string{},
|
||||
fs: f,
|
||||
msg: msg,
|
||||
oldCount: len(old.list),
|
||||
|
@ -181,75 +168,26 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
|||
}
|
||||
|
||||
for _, file := range old.list {
|
||||
// REMEMBER: this section is only concerned with comparing listings from the same side (not different sides)
|
||||
d := deltaZero
|
||||
s := int64(0)
|
||||
h := ""
|
||||
var t time.Time
|
||||
if !now.has(file) {
|
||||
b.indent(msg, file, Color(terminal.RedFg, "File was deleted"))
|
||||
b.indent(msg, file, "File was deleted")
|
||||
ds.deleted++
|
||||
d |= deltaDeleted
|
||||
} else {
|
||||
// skip dirs here, as we only care if they are new/deleted, not newer/older
|
||||
if !now.isDir(file) {
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
}
|
||||
s = now.getSize(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
}
|
||||
t = now.getTime(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
if old.getTime(file) != now.getTime(file) {
|
||||
if old.beforeOther(now, file) {
|
||||
b.indent(msg, file, "File is newer")
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
b.indent(msg, file, "File is OLDER")
|
||||
d |= deltaOlder
|
||||
}
|
||||
}
|
||||
// TODO Compare sizes and hashes
|
||||
}
|
||||
|
||||
if d.is(deltaModified) {
|
||||
ds.deltas[file] = d
|
||||
if b.opt.Compare.Size {
|
||||
ds.size[file] = s
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
ds.time[file] = t
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
ds.hash[file] = h
|
||||
}
|
||||
} else if d.is(deltaDeleted) {
|
||||
ds.deltas[file] = d
|
||||
} else {
|
||||
// Once we've found at least one unchanged file,
|
||||
// we know that not everything has changed,
|
||||
|
@ -260,17 +198,8 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
|||
|
||||
for _, file := range now.list {
|
||||
if !old.has(file) {
|
||||
b.indent(msg, file, Color(terminal.GreenFg, "File is new"))
|
||||
b.indent(msg, file, "File is new")
|
||||
ds.deltas[file] = deltaNew
|
||||
if b.opt.Compare.Size {
|
||||
ds.size[file] = now.getSize(file)
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
ds.time[file] = now.getTime(file)
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
ds.hash[file] = now.getHash(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -288,7 +217,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
|||
}
|
||||
|
||||
// applyDeltas
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, err error) {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
|
@ -297,17 +226,9 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
|||
delete1 := bilib.Names{}
|
||||
delete2 := bilib.Names{}
|
||||
handled := bilib.Names{}
|
||||
renameSkipped := bilib.Names{}
|
||||
deletedonboth := bilib.Names{}
|
||||
skippedDirs1 := newFileList()
|
||||
skippedDirs2 := newFileList()
|
||||
b.renames = renames{}
|
||||
|
||||
ctxMove := b.opt.setDryRun(ctx)
|
||||
|
||||
// update AliasMap for deleted files, as march does not know about them
|
||||
b.updateAliases(ctx, ds1, ds2)
|
||||
|
||||
// efficient isDir check
|
||||
// we load the listing just once and store only the dirs
|
||||
dirs1, dirs1Err := b.listDirsOnly(1)
|
||||
|
@ -338,32 +259,14 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
|||
ctxCheck, filterCheck := filter.AddConfig(ctxNew)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
alias := b.aliases.Alias(file)
|
||||
d1 := ds1.deltas[file]
|
||||
if d1.is(deltaOther) {
|
||||
d2, in2 := ds2.deltas[file]
|
||||
file2 := file
|
||||
if !in2 && file != alias {
|
||||
d2 = ds2.deltas[alias]
|
||||
file2 = alias
|
||||
}
|
||||
d2 := ds2.deltas[file]
|
||||
if d2.is(deltaOther) {
|
||||
// if size or hash differ, skip this, as we already know they're not equal
|
||||
if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) ||
|
||||
(b.opt.Compare.Checksum && hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
|
||||
fs.Debugf(file, "skipping equality check as size/hash definitely differ")
|
||||
if err := filterCheck.AddFile(file); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
} else {
|
||||
checkit := func(filename string) {
|
||||
if err := filterCheck.AddFile(filename); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", filename)
|
||||
}
|
||||
}
|
||||
checkit(file)
|
||||
if file != alias {
|
||||
checkit(alias)
|
||||
}
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -373,17 +276,12 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
|||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
alias := b.aliases.Alias(file)
|
||||
p1 := path1 + file
|
||||
p2 := path2 + alias
|
||||
p2 := path2 + file
|
||||
d1 := ds1.deltas[file]
|
||||
|
||||
if d1.is(deltaOther) {
|
||||
d2, in2 := ds2.deltas[file]
|
||||
// try looking under alternate name
|
||||
if !in2 && file != alias {
|
||||
d2, in2 = ds2.deltas[alias]
|
||||
}
|
||||
if !in2 {
|
||||
b.indent("Path1", p2, "Queue copy to Path2")
|
||||
copy1to2.Add(file)
|
||||
|
@ -395,46 +293,30 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
|||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
ls1.getPut(file, skippedDirs1)
|
||||
ls2.getPut(file, skippedDirs2)
|
||||
b.debugFn(file, func() {
|
||||
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, ls1.has(b.DebugName), ls2.has(b.DebugName)))
|
||||
})
|
||||
if dirs1.has(file) && dirs2.has(file) {
|
||||
fs.Debugf(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
} else {
|
||||
equal := matches.Has(file)
|
||||
if !equal {
|
||||
equal = matches.Has(alias)
|
||||
}
|
||||
if equal {
|
||||
if ciCheck.FixCase && file != alias {
|
||||
// the content is equal but filename still needs to be FixCase'd, so copy1to2
|
||||
// the Path1 version is deemed "correct" in this scenario
|
||||
fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file)
|
||||
copy1to2.Add(file)
|
||||
} else if b.opt.Compare.Modtime && timeDiffers(ctx, ls1.getTime(ls1.getTryAlias(file, alias)), ls2.getTime(ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
|
||||
fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)")
|
||||
if ls1.getTime(ls1.getTryAlias(file, alias)).Before(ls2.getTime(ls2.getTryAlias(file, alias))) {
|
||||
// Path2 is newer
|
||||
b.indent("Path2", p1, "Queue copy to Path1")
|
||||
copy2to1.Add(ls2.getTryAlias(file, alias))
|
||||
} else {
|
||||
// Path1 is newer
|
||||
b.indent("Path1", p2, "Queue copy to Path2")
|
||||
copy1to2.Add(ls1.getTryAlias(file, alias))
|
||||
}
|
||||
} else {
|
||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||
renameSkipped.Add(file)
|
||||
renameSkipped.Add(alias)
|
||||
}
|
||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||
} else {
|
||||
fs.Debugf(nil, "Files are NOT equal: %s", file)
|
||||
err = b.resolve(ctxMove, path1, path2, file, alias, &renameSkipped, ©1to2, ©2to1, ds1, ds2)
|
||||
if err != nil {
|
||||
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
|
||||
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
|
||||
b.critical = true
|
||||
return
|
||||
}
|
||||
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
|
||||
copy1to2.Add(file + "..path1")
|
||||
|
||||
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
|
||||
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
|
||||
return
|
||||
}
|
||||
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
|
||||
copy2to1.Add(file + "..path2")
|
||||
}
|
||||
}
|
||||
handled.Add(file)
|
||||
|
@ -442,37 +324,24 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
|||
} else {
|
||||
// Path1 deleted
|
||||
d2, in2 := ds2.deltas[file]
|
||||
// try looking under alternate name
|
||||
fs.Debugf(file, "alias: %s, in2: %v", alias, in2)
|
||||
if !in2 && file != alias {
|
||||
fs.Debugf(file, "looking for alias: %s", alias)
|
||||
d2, in2 = ds2.deltas[alias]
|
||||
if in2 {
|
||||
fs.Debugf(file, "detected alias: %s", alias)
|
||||
}
|
||||
}
|
||||
if !in2 {
|
||||
b.indent("Path2", p2, "Queue delete")
|
||||
delete2.Add(file)
|
||||
copy1to2.Add(file)
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("Path2", p1, "Queue copy to Path1")
|
||||
copy2to1.Add(file)
|
||||
handled.Add(file)
|
||||
} else if d2.is(deltaDeleted) {
|
||||
handled.Add(file)
|
||||
deletedonboth.Add(file)
|
||||
deletedonboth.Add(alias)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range ds2.sort() {
|
||||
alias := b.aliases.Alias(file)
|
||||
p1 := path1 + alias
|
||||
p1 := path1 + file
|
||||
d2 := ds2.deltas[file]
|
||||
|
||||
if handled.Has(file) || handled.Has(alias) {
|
||||
if handled.Has(file) {
|
||||
continue
|
||||
}
|
||||
if d2.is(deltaOther) {
|
||||
|
@ -482,67 +351,57 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
|||
// Deleted
|
||||
b.indent("Path1", p1, "Queue delete")
|
||||
delete1.Add(file)
|
||||
copy2to1.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
// Do the batch operation
|
||||
if copy2to1.NotEmpty() && !b.InGracefulShutdown {
|
||||
if copy2to1.NotEmpty() {
|
||||
changes1 = true
|
||||
b.indent("Path2", "Path1", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 1)
|
||||
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
|
||||
// retries, if any
|
||||
results2to1, err = b.retryFastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1", results2to1, err)
|
||||
|
||||
if !b.InGracefulShutdown && err != nil {
|
||||
err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() && !b.InGracefulShutdown {
|
||||
if copy1to2.NotEmpty() {
|
||||
changes2 = true
|
||||
b.indent("Path1", "Path2", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 2)
|
||||
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
|
||||
// retries, if any
|
||||
results1to2, err = b.retryFastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2", results1to2, err)
|
||||
|
||||
if !b.InGracefulShutdown && err != nil {
|
||||
err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, "make")
|
||||
}
|
||||
|
||||
if delete1.NotEmpty() && !b.InGracefulShutdown {
|
||||
if err = b.saveQueue(delete1, "delete1"); err != nil {
|
||||
if delete1.NotEmpty() {
|
||||
changes1 = true
|
||||
b.indent("", "Path1", "Do queued deletes on")
|
||||
err = b.fastDelete(ctx, b.fs1, delete1, "delete1")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, "remove")
|
||||
}
|
||||
|
||||
if delete2.NotEmpty() && !b.InGracefulShutdown {
|
||||
if err = b.saveQueue(delete2, "delete2"); err != nil {
|
||||
if delete2.NotEmpty() {
|
||||
changes2 = true
|
||||
b.indent("", "Path2", "Do queued deletes on")
|
||||
err = b.fastDelete(ctx, b.fs2, delete2, "delete2")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
|
||||
}
|
||||
|
||||
queues.copy1to2 = copy1to2
|
||||
queues.copy2to1 = copy2to1
|
||||
queues.renameSkipped = renameSkipped
|
||||
queues.deletedonboth = deletedonboth
|
||||
queues.skippedDirs1 = skippedDirs1
|
||||
queues.skippedDirs2 = skippedDirs2
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, "remove")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -565,65 +424,3 @@ func (ds *deltaSet) excessDeletes() bool {
|
|||
maxDelete, ds.deleted, ds.oldCount, ds.msg, quotePath(bilib.FsPath(ds.fs)))
|
||||
return true
|
||||
}
|
||||
|
||||
// normally we build the AliasMap from march results,
|
||||
// however, march does not know about deleted files, so need to manually check them for aliases
|
||||
func (b *bisyncRun) updateAliases(ctx context.Context, ds1, ds2 *deltaSet) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// skip if not needed
|
||||
if ci.NoUnicodeNormalization && !ci.IgnoreCaseSync && !b.fs1.Features().CaseInsensitive && !b.fs2.Features().CaseInsensitive {
|
||||
return
|
||||
}
|
||||
if ds1.deleted < 1 && ds2.deleted < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Updating AliasMap")
|
||||
|
||||
transform := func(s string) string {
|
||||
if !ci.NoUnicodeNormalization {
|
||||
s = norm.NFC.String(s)
|
||||
}
|
||||
// note: march only checks the dest, but we check both here
|
||||
if ci.IgnoreCaseSync || b.fs1.Features().CaseInsensitive || b.fs2.Features().CaseInsensitive {
|
||||
s = strings.ToLower(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
delMap1 := map[string]string{} // [transformedname]originalname
|
||||
delMap2 := map[string]string{} // [transformedname]originalname
|
||||
fullMap1 := map[string]string{} // [transformedname]originalname
|
||||
fullMap2 := map[string]string{} // [transformedname]originalname
|
||||
|
||||
for _, name := range ls1.list {
|
||||
fullMap1[transform(name)] = name
|
||||
}
|
||||
for _, name := range ls2.list {
|
||||
fullMap2[transform(name)] = name
|
||||
}
|
||||
|
||||
addDeletes := func(ds *deltaSet, delMap, fullMap map[string]string) {
|
||||
for _, file := range ds.sort() {
|
||||
d := ds.deltas[file]
|
||||
if d.is(deltaDeleted) {
|
||||
delMap[transform(file)] = file
|
||||
fullMap[transform(file)] = file
|
||||
}
|
||||
}
|
||||
}
|
||||
addDeletes(ds1, delMap1, fullMap1)
|
||||
addDeletes(ds2, delMap2, fullMap2)
|
||||
|
||||
addAliases := func(delMap, fullMap map[string]string) {
|
||||
for transformedname, name := range delMap {
|
||||
matchedName, found := fullMap[transformedname]
|
||||
if found && name != matchedName {
|
||||
fs.Debugf(name, "adding alias %s", matchedName)
|
||||
b.aliases.Add(name, matchedName)
|
||||
}
|
||||
}
|
||||
}
|
||||
addAliases(delMap1, fullMap2)
|
||||
addAliases(delMap2, fullMap1)
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ func makeHelp(help string) string {
|
|||
"|", "`",
|
||||
"{MAXDELETE}", strconv.Itoa(DefaultMaxDelete),
|
||||
"{CHECKFILE}", DefaultCheckFilename,
|
||||
// "{WORKDIR}", DefaultWorkdir,
|
||||
"{WORKDIR}", DefaultWorkdir,
|
||||
)
|
||||
return replacer.Replace(help)
|
||||
}
|
||||
|
@ -37,9 +37,7 @@ var rcHelp = makeHelp(`This takes the following parameters
|
|||
- ignoreListingChecksum - Do not use checksums for listings
|
||||
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
|
||||
Use at your own risk!
|
||||
- workdir - server directory for history files (default: |~/.cache/rclone/bisync|)
|
||||
- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
|
||||
- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
|
||||
- workdir - server directory for history files (default: {WORKDIR})
|
||||
- noCleanup - retain working files
|
||||
|
||||
See [bisync command help](https://rclone.org/commands/rclone_bisync/)
|
||||
|
@ -56,10 +54,5 @@ On each successive run it will:
|
|||
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
|
||||
- Propagate changes on Path1 to Path2, and vice-versa.
|
||||
|
||||
Bisync is **in beta** and is considered an **advanced command**, so use with care.
|
||||
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
|
||||
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
|
||||
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.
|
||||
`)
|
||||
|
|
|
@ -5,23 +5,18 @@ package bisync
|
|||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"golang.org/x/exp/slices"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
)
|
||||
|
||||
// ListingHeader defines first line of a listing
|
||||
|
@ -37,7 +32,7 @@ const ListingHeader = "# bisync listing v1 from"
|
|||
// id: "-" (reserved)
|
||||
const lineFormat = "%s %8d %s %s %s %q\n"
|
||||
|
||||
var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`)
|
||||
var lineRegex = regexp.MustCompile(`^(\S) +(\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`)
|
||||
|
||||
// timeFormat defines time format used in listings
|
||||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
|
@ -70,73 +65,27 @@ func newFileList() *fileList {
|
|||
}
|
||||
|
||||
func (ls *fileList) empty() bool {
|
||||
if ls == nil {
|
||||
return true
|
||||
}
|
||||
return len(ls.list) == 0
|
||||
}
|
||||
|
||||
func (ls *fileList) has(file string) bool {
|
||||
if file == "" {
|
||||
fs.Debugf(nil, "called ls.has() with blank string")
|
||||
return false
|
||||
}
|
||||
_, found := ls.info[file]
|
||||
if !found {
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
_, found = ls.info[file]
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func (ls *fileList) get(file string) *fileInfo {
|
||||
info, found := ls.info[file]
|
||||
if !found {
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
info = ls.info[fmt.Sprint(file)]
|
||||
}
|
||||
return info
|
||||
return ls.info[file]
|
||||
}
|
||||
|
||||
// copy file from ls to dest
|
||||
func (ls *fileList) getPut(file string, dest *fileList) {
|
||||
f := ls.get(file)
|
||||
dest.put(file, f.size, f.time, f.hash, f.id, f.flags)
|
||||
}
|
||||
|
||||
func (ls *fileList) getPutAll(dest *fileList) {
|
||||
for file, f := range ls.info {
|
||||
dest.put(file, f.size, f.time, f.hash, f.id, f.flags)
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *fileList) remove(file string) {
|
||||
if ls.has(file) {
|
||||
ls.list = slices.Delete(ls.list, slices.Index(ls.list, file), slices.Index(ls.list, file)+1)
|
||||
delete(ls.info, file)
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *fileList) put(file string, size int64, modtime time.Time, hash, id string, flags string) {
|
||||
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string, flags string) {
|
||||
fi := ls.get(file)
|
||||
if fi != nil {
|
||||
fi.size = size
|
||||
// if already have higher precision of same time, avoid overwriting it
|
||||
if fi.time != modtime {
|
||||
if modtime.Before(fi.time) && fi.time.Sub(modtime) < time.Second {
|
||||
modtime = fi.time
|
||||
}
|
||||
}
|
||||
fi.time = modtime
|
||||
fi.hash = hash
|
||||
fi.id = id
|
||||
fi.flags = flags
|
||||
fi.time = time
|
||||
} else {
|
||||
fi = &fileInfo{
|
||||
size: size,
|
||||
time: modtime,
|
||||
time: time,
|
||||
hash: hash,
|
||||
id: id,
|
||||
flags: flags,
|
||||
|
@ -146,15 +95,6 @@ func (ls *fileList) put(file string, size int64, modtime time.Time, hash, id str
|
|||
}
|
||||
}
|
||||
|
||||
func (ls *fileList) getTryAlias(file, alias string) string {
|
||||
if ls.has(file) {
|
||||
return file
|
||||
} else if ls.has(alias) {
|
||||
return alias
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ls *fileList) getTime(file string) time.Time {
|
||||
fi := ls.get(file)
|
||||
if fi == nil {
|
||||
|
@ -163,59 +103,6 @@ func (ls *fileList) getTime(file string) time.Time {
|
|||
return fi.time
|
||||
}
|
||||
|
||||
func (ls *fileList) getSize(file string) int64 {
|
||||
fi := ls.get(file)
|
||||
if fi == nil {
|
||||
return 0
|
||||
}
|
||||
return fi.size
|
||||
}
|
||||
|
||||
func (ls *fileList) getHash(file string) string {
|
||||
fi := ls.get(file)
|
||||
if fi == nil {
|
||||
return ""
|
||||
}
|
||||
return fi.hash
|
||||
}
|
||||
|
||||
func (b *bisyncRun) fileInfoEqual(file1, file2 string, ls1, ls2 *fileList) bool {
|
||||
equal := true
|
||||
if ls1.isDir(file1) && ls2.isDir(file2) {
|
||||
return equal
|
||||
}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(ls1.getSize(file1), ls2.getSize(file2)) {
|
||||
b.indent("ERROR", file1, fmt.Sprintf("Size not equal in listing. Path1: %v, Path2: %v", ls1.getSize(file1), ls2.getSize(file2)))
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(b.fctx, ls1.getTime(file1), ls2.getTime(file2), b.fs1, b.fs2) {
|
||||
b.indent("ERROR", file1, fmt.Sprintf("Modtime not equal in listing. Path1: %v, Path2: %v", ls1.getTime(file1), ls2.getTime(file2)))
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Checksum && !ignoreListingChecksum {
|
||||
if hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
|
||||
b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2)))
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
return equal
|
||||
}
|
||||
|
||||
// also returns false if not found
|
||||
func (ls *fileList) isDir(file string) bool {
|
||||
fi := ls.get(file)
|
||||
if fi != nil {
|
||||
if fi.flags == "d" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ls *fileList) beforeOther(other *fileList, file string) bool {
|
||||
thisTime := ls.getTime(file)
|
||||
thatTime := other.getTime(file)
|
||||
|
@ -233,20 +120,12 @@ func (ls *fileList) afterTime(file string, time time.Time) bool {
|
|||
return fi.time.After(time)
|
||||
}
|
||||
|
||||
// sort by path name
|
||||
func (ls *fileList) sort() {
|
||||
sort.SliceStable(ls.list, func(i, j int) bool {
|
||||
return ls.list[i] < ls.list[j]
|
||||
})
|
||||
}
|
||||
|
||||
// save will save listing to a file.
|
||||
func (ls *fileList) save(ctx context.Context, listing string) error {
|
||||
file, err := os.Create(listing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ls.sort()
|
||||
|
||||
hashName := ""
|
||||
if ls.hash != hash.None {
|
||||
|
@ -293,6 +172,7 @@ func (ls *fileList) save(ctx context.Context, listing string) error {
|
|||
|
||||
// loadListing will load listing from a file.
|
||||
// The key is the path to the file relative to the Path1/Path2 base.
|
||||
// File size of -1, as for Google Docs, prints a warning and won't be loaded.
|
||||
func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
||||
file, err := os.Open(listing)
|
||||
if err != nil {
|
||||
|
@ -361,24 +241,6 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
|||
return ls, nil
|
||||
}
|
||||
|
||||
// saveOldListings saves the most recent successful listing, in case we need to rollback on error
|
||||
func (b *bisyncRun) saveOldListings() {
|
||||
b.handleErr(b.listing1, "error saving old Path1 listing", bilib.CopyFileIfExists(b.listing1, b.listing1+"-old"), true, true)
|
||||
b.handleErr(b.listing2, "error saving old Path2 listing", bilib.CopyFileIfExists(b.listing2, b.listing2+"-old"), true, true)
|
||||
}
|
||||
|
||||
// replaceCurrentListings saves both ".lst-new" listings as ".lst"
|
||||
func (b *bisyncRun) replaceCurrentListings() {
|
||||
b.handleErr(b.newListing1, "error replacing Path1 listing", bilib.CopyFileIfExists(b.newListing1, b.listing1), true, true)
|
||||
b.handleErr(b.newListing2, "error replacing Path2 listing", bilib.CopyFileIfExists(b.newListing2, b.listing2), true, true)
|
||||
}
|
||||
|
||||
// revertToOldListings reverts to the most recent successful listing
|
||||
func (b *bisyncRun) revertToOldListings() {
|
||||
b.handleErr(b.listing1, "error reverting to old Path1 listing", bilib.CopyFileIfExists(b.listing1+"-old", b.listing1), true, true)
|
||||
b.handleErr(b.listing2, "error reverting to old Path2 listing", bilib.CopyFileIfExists(b.listing2+"-old", b.listing2), true, true)
|
||||
}
|
||||
|
||||
func parseHash(str string) (string, string, error) {
|
||||
if str == "-" {
|
||||
return "", "", nil
|
||||
|
@ -392,6 +254,71 @@ func parseHash(str string) (string, string, error) {
|
|||
return "", "", fmt.Errorf("invalid hash %q", str)
|
||||
}
|
||||
|
||||
// makeListing will produce listing from directory tree and write it to a file
|
||||
func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (ls *fileList, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
depth := ci.MaxDepth
|
||||
hashType := hash.None
|
||||
if !b.opt.IgnoreListingChecksum {
|
||||
// Currently bisync just honors --ignore-listing-checksum
|
||||
// (note that this is different from --ignore-checksum)
|
||||
// TODO add full support for checksums and related flags
|
||||
hashType = f.Hashes().GetOne()
|
||||
}
|
||||
ls = newFileList()
|
||||
ls.hash = hashType
|
||||
var lock sync.Mutex
|
||||
listType := walk.ListObjects
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
listType = walk.ListAll
|
||||
}
|
||||
err = walk.ListR(ctx, f, "", false, depth, listType, func(entries fs.DirEntries) error {
|
||||
var firstErr error
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
//tr := accounting.Stats(ctx).NewCheckingTransfer(o) // TODO
|
||||
var (
|
||||
hashVal string
|
||||
hashErr error
|
||||
)
|
||||
if hashType != hash.None {
|
||||
hashVal, hashErr = o.Hash(ctx, hashType)
|
||||
if firstErr == nil {
|
||||
firstErr = hashErr
|
||||
}
|
||||
}
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
flags := "-" // "-" for a file and "d" for a directory
|
||||
lock.Lock()
|
||||
ls.put(o.Remote(), o.Size(), time, hashVal, id, flags)
|
||||
lock.Unlock()
|
||||
//tr.Done(ctx, nil) // TODO
|
||||
})
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
entries.ForDir(func(o fs.Directory) {
|
||||
var (
|
||||
hashVal string
|
||||
)
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
flags := "d" // "-" for a file and "d" for a directory
|
||||
lock.Lock()
|
||||
//record size as 0 instead of -1, so bisync doesn't think it's a google doc
|
||||
ls.put(o.Remote(), 0, time, hashVal, id, flags)
|
||||
lock.Unlock()
|
||||
})
|
||||
}
|
||||
return firstErr
|
||||
})
|
||||
if err == nil {
|
||||
err = ls.save(ctx, listing)
|
||||
}
|
||||
if err != nil {
|
||||
b.abort = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// checkListing verifies that listing is not empty (unless resynching)
|
||||
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
if b.opt.Resync || !ls.empty() {
|
||||
|
@ -449,439 +376,3 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
|||
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
// ConvertPrecision returns the Modtime rounded to Dest's precision if lower, otherwise unchanged
|
||||
// Need to use the other fs's precision (if lower) when copying
|
||||
// Note: we need to use Truncate rather than Round so that After() is reliable.
|
||||
// (2023-11-02 20:22:45.552679442 +0000 < UTC 2023-11-02 20:22:45.553 +0000 UTC)
|
||||
func ConvertPrecision(Modtime time.Time, dst fs.Fs) time.Time {
|
||||
DestPrecision := dst.Precision()
|
||||
|
||||
// In case it's wrapping an Fs with lower precision, try unwrapping and use the lowest.
|
||||
if Modtime.Truncate(DestPrecision).After(Modtime.Truncate(fs.UnWrapFs(dst).Precision())) {
|
||||
DestPrecision = fs.UnWrapFs(dst).Precision()
|
||||
}
|
||||
|
||||
if Modtime.After(Modtime.Truncate(DestPrecision)) {
|
||||
return Modtime.Truncate(DestPrecision)
|
||||
}
|
||||
return Modtime
|
||||
}
|
||||
|
||||
// modifyListing will modify the listing based on the results of the sync
|
||||
func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, results []Results, queues queues, is1to2 bool) (err error) {
|
||||
queue := queues.copy2to1
|
||||
direction := "2to1"
|
||||
if is1to2 {
|
||||
queue = queues.copy1to2
|
||||
direction = "1to2"
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "updating %s", direction)
|
||||
prettyprint(results, "results", fs.LogLevelDebug)
|
||||
prettyprint(queue, "queue", fs.LogLevelDebug)
|
||||
|
||||
srcListing, dstListing := b.getListingNames(is1to2)
|
||||
srcList, err := b.loadListing(srcListing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read prior listing: %w", err)
|
||||
}
|
||||
dstList, err := b.loadListing(dstListing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read prior listing: %w", err)
|
||||
}
|
||||
// set list hash type
|
||||
if b.opt.Resync && !b.opt.IgnoreListingChecksum {
|
||||
if is1to2 {
|
||||
srcList.hash = b.opt.Compare.HashType1
|
||||
dstList.hash = b.opt.Compare.HashType2
|
||||
} else {
|
||||
srcList.hash = b.opt.Compare.HashType2
|
||||
dstList.hash = b.opt.Compare.HashType1
|
||||
}
|
||||
if b.opt.Compare.DownloadHash && srcList.hash == hash.None {
|
||||
srcList.hash = hash.MD5
|
||||
}
|
||||
if b.opt.Compare.DownloadHash && dstList.hash == hash.None {
|
||||
dstList.hash = hash.MD5
|
||||
}
|
||||
}
|
||||
|
||||
b.debugFn(b.DebugName, func() {
|
||||
var rs ResultsSlice = results
|
||||
b.debug(b.DebugName, fmt.Sprintf("modifyListing direction: %s, results has name?: %v", direction, rs.has(b.DebugName)))
|
||||
b.debug(b.DebugName, fmt.Sprintf("modifyListing direction: %s, srcList has name?: %v, dstList has name?: %v", direction, srcList.has(b.DebugName), dstList.has(b.DebugName)))
|
||||
})
|
||||
|
||||
srcWinners := newFileList()
|
||||
dstWinners := newFileList()
|
||||
errors := newFileList()
|
||||
ctxRecheck, filterRecheck := filter.AddConfig(ctx)
|
||||
|
||||
for _, result := range results {
|
||||
if result.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if result.AltName != "" {
|
||||
b.aliases.Add(result.Name, result.AltName)
|
||||
}
|
||||
|
||||
if result.Flags == "d" && !b.opt.CreateEmptySrcDirs {
|
||||
continue
|
||||
}
|
||||
|
||||
// build src winners list
|
||||
if result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
srcWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, src), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to src", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
// build dst winners list
|
||||
if result.IsWinner && result.Winner.Side != "none" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
dstWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, dst), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to dst", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
// build errors list
|
||||
if result.Err != nil || result.Winner.Err != nil {
|
||||
errors.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags)
|
||||
if err := filterRecheck.AddFile(result.Name); err != nil {
|
||||
fs.Debugf(result.Name, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
updateLists := func(side string, winners, list *fileList) {
|
||||
for _, queueFile := range queue.ToList() {
|
||||
if !winners.has(queueFile) && list.has(queueFile) && !errors.has(queueFile) {
|
||||
// removals from side
|
||||
list.remove(queueFile)
|
||||
fs.Debugf(nil, "decision: removed from %s: %v", side, queueFile)
|
||||
} else if winners.has(queueFile) {
|
||||
// copies to side
|
||||
new := winners.get(queueFile)
|
||||
|
||||
// handle normalization
|
||||
if side == "dst" {
|
||||
alias := b.aliases.Alias(queueFile)
|
||||
if alias != queueFile {
|
||||
// use the (non-identical) existing name, unless --fix-case
|
||||
if ci.FixCase {
|
||||
fs.Debugf(direction, "removing %s and adding %s as --fix-case was specified", alias, queueFile)
|
||||
list.remove(alias)
|
||||
} else {
|
||||
fs.Debugf(direction, "casing/unicode difference detected. using %s instead of %s", alias, queueFile)
|
||||
queueFile = alias
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list.put(queueFile, new.size, new.time, new.hash, new.id, new.flags)
|
||||
fs.Debugf(nil, "decision: copied to %s: %v", side, queueFile)
|
||||
} else {
|
||||
fs.Debugf(queueFile, "file in queue but missing from %s transfers", side)
|
||||
if err := filterRecheck.AddFile(queueFile); err != nil {
|
||||
fs.Debugf(queueFile, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
updateLists("src", srcWinners, srcList)
|
||||
updateLists("dst", dstWinners, dstList)
|
||||
|
||||
// account for "deltaOthers" we handled separately
|
||||
if queues.deletedonboth.NotEmpty() {
|
||||
for file := range queues.deletedonboth {
|
||||
srcList.remove(file)
|
||||
dstList.remove(file)
|
||||
}
|
||||
}
|
||||
if b.renames.NotEmpty() && !b.opt.DryRun {
|
||||
// renamed on src and copied to dst
|
||||
for _, rename := range b.renames {
|
||||
srcOldName, srcNewName, dstOldName, dstNewName := rename.getNames(is1to2)
|
||||
fs.Debugf(nil, "%s: srcOldName: %v srcNewName: %v dstOldName: %v dstNewName: %v", direction, srcOldName, srcNewName, dstOldName, dstNewName)
|
||||
// we'll handle the other side when we go the other direction
|
||||
var new *fileInfo
|
||||
// we prefer to get the info from the newNamed versions
|
||||
// since they were actually copied as opposed to operations.MoveFile()'d.
|
||||
// the size/time/hash info is therefore fresher on the renames
|
||||
// but we'll settle for the original if we have to.
|
||||
if srcList.has(srcNewName) {
|
||||
new = srcList.get(srcNewName)
|
||||
} else if srcList.has(dstNewName) {
|
||||
new = srcList.get(dstNewName)
|
||||
} else if srcList.has(srcOldName) {
|
||||
new = srcList.get(srcOldName)
|
||||
} else {
|
||||
// something's odd, so let's recheck
|
||||
if err := filterRecheck.AddFile(srcOldName); err != nil {
|
||||
fs.Debugf(srcOldName, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
if srcNewName != "" { // if it was renamed and not deleted
|
||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, ConvertPrecision(new.time, src), new.hash, new.id, new.flags)
|
||||
}
|
||||
if srcNewName != srcOldName {
|
||||
srcList.remove(srcOldName)
|
||||
}
|
||||
if srcNewName != dstOldName {
|
||||
dstList.remove(dstOldName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recheck the ones we skipped because they were equal
|
||||
// we never got their info because they were never synced.
|
||||
// TODO: add flag to skip this? (since it re-lists)
|
||||
if queues.renameSkipped.NotEmpty() {
|
||||
skippedList := queues.renameSkipped.ToList()
|
||||
for _, file := range skippedList {
|
||||
if err := filterRecheck.AddFile(file); err != nil {
|
||||
fs.Debugf(file, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// skipped dirs -- nothing to recheck, just add them
|
||||
// (they are not necessarily there already, if they are new)
|
||||
path1List := srcList
|
||||
path2List := dstList
|
||||
if !is1to2 {
|
||||
path1List = dstList
|
||||
path2List = srcList
|
||||
}
|
||||
if !queues.skippedDirs1.empty() {
|
||||
queues.skippedDirs1.getPutAll(path1List)
|
||||
}
|
||||
if !queues.skippedDirs2.empty() {
|
||||
queues.skippedDirs2.getPutAll(path2List)
|
||||
}
|
||||
|
||||
if filterRecheck.HaveFilesFrom() {
|
||||
// also include any aliases
|
||||
recheckFiles := filterRecheck.Files()
|
||||
for recheckFile := range recheckFiles {
|
||||
alias := b.aliases.Alias(recheckFile)
|
||||
if recheckFile != alias {
|
||||
if err := filterRecheck.AddFile(alias); err != nil {
|
||||
fs.Debugf(alias, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.recheck(ctxRecheck, src, dst, srcList, dstList, is1to2)
|
||||
}
|
||||
|
||||
if b.InGracefulShutdown {
|
||||
var toKeep []string
|
||||
var toRollback []string
|
||||
fs.Debugf(direction, "stats for %s", direction)
|
||||
trs := accounting.Stats(ctx).Transferred()
|
||||
for _, tr := range trs {
|
||||
b.debugFn(tr.Name, func() {
|
||||
prettyprint(tr, tr.Name, fs.LogLevelInfo)
|
||||
})
|
||||
if tr.Error == nil && tr.Bytes > 0 || tr.Size <= 0 {
|
||||
prettyprint(tr, "keeping: "+tr.Name, fs.LogLevelDebug)
|
||||
toKeep = append(toKeep, tr.Name)
|
||||
}
|
||||
}
|
||||
// Dirs (for the unlikely event that the shutdown was triggered post-sync during syncEmptyDirs)
|
||||
for _, r := range results {
|
||||
if r.Origin == "syncEmptyDirs" {
|
||||
if srcWinners.has(r.Name) || dstWinners.has(r.Name) {
|
||||
toKeep = append(toKeep, r.Name)
|
||||
fs.Infof(r.Name, "keeping empty dir")
|
||||
}
|
||||
}
|
||||
}
|
||||
oldSrc, oldDst := b.getOldLists(is1to2)
|
||||
prettyprint(oldSrc.list, "oldSrc", fs.LogLevelDebug)
|
||||
prettyprint(oldDst.list, "oldDst", fs.LogLevelDebug)
|
||||
prettyprint(srcList.list, "srcList", fs.LogLevelDebug)
|
||||
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
|
||||
combinedList := Concat(oldSrc.list, oldDst.list, srcList.list, dstList.list)
|
||||
for _, f := range combinedList {
|
||||
if !slices.Contains(toKeep, f) && !slices.Contains(toKeep, b.aliases.Alias(f)) && !b.opt.DryRun {
|
||||
toRollback = append(toRollback, f)
|
||||
}
|
||||
}
|
||||
b.prepareRollback(toRollback, srcList, dstList, is1to2)
|
||||
prettyprint(oldSrc.list, "oldSrc", fs.LogLevelDebug)
|
||||
prettyprint(oldDst.list, "oldDst", fs.LogLevelDebug)
|
||||
prettyprint(srcList.list, "srcList", fs.LogLevelDebug)
|
||||
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
|
||||
|
||||
// clear stats so we only do this once
|
||||
accounting.MaxCompletedTransfers = 0
|
||||
accounting.Stats(ctx).PruneTransfers()
|
||||
}
|
||||
|
||||
if b.DebugName != "" {
|
||||
b.debug(b.DebugName, fmt.Sprintf("%s pre-save srcList has it?: %v", direction, srcList.has(b.DebugName)))
|
||||
b.debug(b.DebugName, fmt.Sprintf("%s pre-save dstList has it?: %v", direction, dstList.has(b.DebugName)))
|
||||
}
|
||||
// update files
|
||||
err = srcList.save(ctx, srcListing)
|
||||
b.handleErr(srcList, "error saving srcList from modifyListing", err, true, true)
|
||||
err = dstList.save(ctx, dstListing)
|
||||
b.handleErr(dstList, "error saving dstList from modifyListing", err, true, true)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// recheck the ones we're not sure about
|
||||
func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList, dstList *fileList, is1to2 bool) {
|
||||
var srcObjs []fs.Object
|
||||
var dstObjs []fs.Object
|
||||
var resolved []string
|
||||
var toRollback []string
|
||||
|
||||
if err := operations.ListFn(ctxRecheck, src, func(obj fs.Object) {
|
||||
srcObjs = append(srcObjs, obj)
|
||||
}); err != nil {
|
||||
fs.Debugf(src, "error recchecking src obj: %v", err)
|
||||
}
|
||||
if err := operations.ListFn(ctxRecheck, dst, func(obj fs.Object) {
|
||||
dstObjs = append(dstObjs, obj)
|
||||
}); err != nil {
|
||||
fs.Debugf(dst, "error recchecking dst obj: %v", err)
|
||||
}
|
||||
|
||||
putObj := func(obj fs.Object, list *fileList) {
|
||||
hashVal := ""
|
||||
if !b.opt.IgnoreListingChecksum {
|
||||
hashType := list.hash
|
||||
if hashType != hash.None {
|
||||
hashVal, _ = obj.Hash(ctxRecheck, hashType)
|
||||
}
|
||||
hashVal, _ = tryDownloadHash(ctxRecheck, obj, hashVal)
|
||||
}
|
||||
var modtime time.Time
|
||||
if b.opt.Compare.Modtime {
|
||||
modtime = obj.ModTime(ctxRecheck).In(TZ)
|
||||
}
|
||||
list.put(obj.Remote(), obj.Size(), modtime, hashVal, "-", "-")
|
||||
}
|
||||
|
||||
for _, srcObj := range srcObjs {
|
||||
fs.Debugf(srcObj, "rechecking")
|
||||
for _, dstObj := range dstObjs {
|
||||
if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) {
|
||||
// note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't.
|
||||
if b.opt.DryRun || WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
|
||||
putObj(srcObj, srcList)
|
||||
putObj(dstObj, dstList)
|
||||
resolved = append(resolved, srcObj.Remote())
|
||||
} else {
|
||||
fs.Infof(srcObj, "files not equal on recheck: %v %v", srcObj, dstObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
// if srcObj not resolved by now (either because no dstObj match or files not equal),
|
||||
// roll it back to old version, so it gets retried next time.
|
||||
// skip and error during --resync, as rollback is not possible
|
||||
if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun {
|
||||
if b.opt.Resync {
|
||||
err = errors.New("no dstObj match or files not equal")
|
||||
b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false)
|
||||
} else {
|
||||
toRollback = append(toRollback, srcObj.Remote())
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(toRollback) > 0 {
|
||||
srcListing, dstListing := b.getListingNames(is1to2)
|
||||
oldSrc, err := b.loadListing(srcListing + "-old")
|
||||
b.handleErr(oldSrc, "error loading old src listing", err, true, true)
|
||||
oldDst, err := b.loadListing(dstListing + "-old")
|
||||
b.handleErr(oldDst, "error loading old dst listing", err, true, true)
|
||||
if b.critical {
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range toRollback {
|
||||
b.rollback(item, oldSrc, srcList)
|
||||
b.rollback(item, oldDst, dstList)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) getListingNames(is1to2 bool) (srcListing string, dstListing string) {
|
||||
if is1to2 {
|
||||
return b.listing1, b.listing2
|
||||
}
|
||||
return b.listing2, b.listing1
|
||||
}
|
||||
|
||||
func (b *bisyncRun) rollback(item string, oldList, newList *fileList) {
|
||||
alias := b.aliases.Alias(item)
|
||||
if oldList.has(item) {
|
||||
oldList.getPut(item, newList)
|
||||
fs.Debugf(nil, "adding to newlist: %s", item)
|
||||
} else if oldList.has(alias) {
|
||||
oldList.getPut(alias, newList)
|
||||
fs.Debugf(nil, "adding to newlist: %s", alias)
|
||||
} else {
|
||||
fs.Debugf(nil, "removing from newlist: %s (has it?: %v)", item, newList.has(item))
|
||||
prettyprint(newList.list, "newList", fs.LogLevelDebug)
|
||||
newList.remove(item)
|
||||
newList.remove(alias)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) prepareRollback(toRollback []string, srcList, dstList *fileList, is1to2 bool) {
|
||||
if len(toRollback) > 0 {
|
||||
oldSrc, oldDst := b.getOldLists(is1to2)
|
||||
if b.critical {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf("new lists", "src: (%v), dest: (%v)", len(srcList.list), len(dstList.list))
|
||||
|
||||
for _, item := range toRollback {
|
||||
b.debugFn(item, func() {
|
||||
b.debug(item, fmt.Sprintf("pre-rollback oldSrc has it?: %v", oldSrc.has(item)))
|
||||
b.debug(item, fmt.Sprintf("pre-rollback oldDst has it?: %v", oldDst.has(item)))
|
||||
b.debug(item, fmt.Sprintf("pre-rollback srcList has it?: %v", srcList.has(item)))
|
||||
b.debug(item, fmt.Sprintf("pre-rollback dstList has it?: %v", dstList.has(item)))
|
||||
})
|
||||
b.rollback(item, oldSrc, srcList)
|
||||
b.rollback(item, oldDst, dstList)
|
||||
b.debugFn(item, func() {
|
||||
b.debug(item, fmt.Sprintf("post-rollback oldSrc has it?: %v", oldSrc.has(item)))
|
||||
b.debug(item, fmt.Sprintf("post-rollback oldDst has it?: %v", oldDst.has(item)))
|
||||
b.debug(item, fmt.Sprintf("post-rollback srcList has it?: %v", srcList.has(item)))
|
||||
b.debug(item, fmt.Sprintf("post-rollback dstList has it?: %v", dstList.has(item)))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) getOldLists(is1to2 bool) (*fileList, *fileList) {
|
||||
srcListing, dstListing := b.getListingNames(is1to2)
|
||||
oldSrc, err := b.loadListing(srcListing + "-old")
|
||||
b.handleErr(oldSrc, "error loading old src listing", err, true, true)
|
||||
oldDst, err := b.loadListing(dstListing + "-old")
|
||||
b.handleErr(oldDst, "error loading old dst listing", err, true, true)
|
||||
fs.Debugf("get old lists", "is1to2: %v, oldsrc: %s (%v), olddest: %s (%v)", is1to2, srcListing+"-old", len(oldSrc.list), dstListing+"-old", len(oldDst.list))
|
||||
return oldSrc, oldDst
|
||||
}
|
||||
|
||||
// Concat returns a new slice concatenating the passed in slices.
|
||||
func Concat[S ~[]E, E any](ss ...S) S {
|
||||
size := 0
|
||||
for _, s := range ss {
|
||||
size += len(s)
|
||||
if size < 0 {
|
||||
panic("len out of range")
|
||||
}
|
||||
}
|
||||
newslice := slices.Grow[S](nil, size)
|
||||
for _, s := range ss {
|
||||
newslice = append(newslice, s...)
|
||||
}
|
||||
return newslice
|
||||
}
|
||||
|
|
|
@ -1,154 +0,0 @@
|
|||
package bisync
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
const basicallyforever = 200 * 365 * 24 * time.Hour
|
||||
|
||||
var stopRenewal func()
|
||||
|
||||
var data = struct {
|
||||
Session string
|
||||
PID string
|
||||
TimeRenewed time.Time
|
||||
TimeExpires time.Time
|
||||
}{}
|
||||
|
||||
func (b *bisyncRun) setLockFile() error {
|
||||
b.lockFile = ""
|
||||
b.setLockFileExpiration()
|
||||
if !b.opt.DryRun {
|
||||
b.lockFile = b.basePath + ".lck"
|
||||
if bilib.FileExists(b.lockFile) {
|
||||
if !b.lockFileIsExpired() {
|
||||
errTip := Color(terminal.MagentaFg, "Tip: this indicates that another bisync run (of these same paths) either is still running or was interrupted before completion. \n")
|
||||
errTip += Color(terminal.MagentaFg, "If you're SURE you want to override this safety feature, you can delete the lock file with the following command, then run bisync again: \n")
|
||||
errTip += fmt.Sprintf(Color(terminal.HiRedFg, "rclone deletefile \"%s\""), b.lockFile)
|
||||
return fmt.Errorf(Color(terminal.RedFg, "prior lock file found: %s \n")+errTip, Color(terminal.HiYellowFg, b.lockFile))
|
||||
}
|
||||
}
|
||||
|
||||
pidStr := []byte(strconv.Itoa(os.Getpid()))
|
||||
if err = os.WriteFile(b.lockFile, pidStr, bilib.PermSecure); err != nil {
|
||||
return fmt.Errorf(Color(terminal.RedFg, "cannot create lock file: %s: %w"), b.lockFile, err)
|
||||
}
|
||||
fs.Debugf(nil, "Lock file created: %s", b.lockFile)
|
||||
b.renewLockFile()
|
||||
stopRenewal = b.startLockRenewal()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) removeLockFile() {
|
||||
if b.lockFile != "" {
|
||||
stopRenewal()
|
||||
errUnlock := os.Remove(b.lockFile)
|
||||
if errUnlock == nil {
|
||||
fs.Debugf(nil, "Lock file removed: %s", b.lockFile)
|
||||
} else if err == nil {
|
||||
err = errUnlock
|
||||
} else {
|
||||
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, errUnlock)
|
||||
}
|
||||
b.lockFile = "" // block removing it again
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setLockFileExpiration() {
|
||||
if b.opt.MaxLock > 0 && b.opt.MaxLock < 2*time.Minute {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "--max-lock cannot be shorter than 2 minutes (unless 0.) Changing --max-lock from %v to %v"), b.opt.MaxLock, 2*time.Minute)
|
||||
b.opt.MaxLock = 2 * time.Minute
|
||||
} else if b.opt.MaxLock <= 0 {
|
||||
b.opt.MaxLock = basicallyforever
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) renewLockFile() {
|
||||
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
|
||||
|
||||
data.Session = b.basePath
|
||||
data.PID = strconv.Itoa(os.Getpid())
|
||||
data.TimeRenewed = time.Now()
|
||||
data.TimeExpires = time.Now().Add(b.opt.MaxLock)
|
||||
|
||||
// save data file
|
||||
df, err := os.Create(b.lockFile)
|
||||
b.handleErr(b.lockFile, "error renewing lock file", err, true, true)
|
||||
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(data), true, true)
|
||||
b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true)
|
||||
if b.opt.MaxLock < basicallyforever {
|
||||
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, data.TimeExpires)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) lockFileIsExpired() bool {
|
||||
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
|
||||
rdf, err := os.Open(b.lockFile)
|
||||
b.handleErr(b.lockFile, "error reading lock file", err, true, true)
|
||||
dec := json.NewDecoder(rdf)
|
||||
for {
|
||||
if err := dec.Decode(&data); err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true)
|
||||
if !data.TimeExpires.IsZero() && data.TimeExpires.Before(time.Now()) {
|
||||
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), data.TimeExpires)
|
||||
markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync)
|
||||
markFailed(b.listing2)
|
||||
return true
|
||||
}
|
||||
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), data.TimeExpires, time.Since(data.TimeExpires).Abs().Round(time.Second))
|
||||
prettyprint(data, "Lockfile info", fs.LogLevelInfo)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// StartLockRenewal renews the lockfile every --max-lock minus one minute.
|
||||
//
|
||||
// It returns a func which should be called to stop the renewal.
|
||||
func (b *bisyncRun) startLockRenewal() func() {
|
||||
if b.opt.MaxLock <= 0 || b.opt.MaxLock >= basicallyforever || b.lockFile == "" {
|
||||
return func() {}
|
||||
}
|
||||
stopLockRenewal := make(chan struct{})
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ticker := time.NewTicker(b.opt.MaxLock - time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
b.renewLockFile()
|
||||
case <-stopLockRenewal:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
close(stopLockRenewal)
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func markFailed(file string) {
|
||||
failFile := file + "-err"
|
||||
if bilib.FileExists(file) {
|
||||
_ = os.Remove(failFile)
|
||||
_ = os.Rename(file, failFile)
|
||||
}
|
||||
}
|
|
@ -1,15 +1,12 @@
|
|||
package bisync
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
func (b *bisyncRun) indentf(tag, file, format string, args ...interface{}) {
|
||||
|
@ -28,23 +25,12 @@ func (b *bisyncRun) indent(tag, file, msg string) {
|
|||
tag = tag[1:]
|
||||
logf = fs.Logf
|
||||
}
|
||||
|
||||
if tag == "Path1" {
|
||||
tag = Color(terminal.CyanFg, "Path1")
|
||||
} else {
|
||||
tag = Color(terminal.BlueFg, tag)
|
||||
}
|
||||
msg = Color(terminal.MagentaFg, msg)
|
||||
msg = strings.Replace(msg, "Queue copy to", Color(terminal.GreenFg, "Queue copy to"), -1)
|
||||
msg = strings.Replace(msg, "Queue delete", Color(terminal.RedFg, "Queue delete"), -1)
|
||||
file = Color(terminal.CyanFg, escapePath(file, false))
|
||||
logf(nil, "- %-18s%-43s - %s", tag, msg, file)
|
||||
logf(nil, "- %-9s%-35s - %s", tag, msg, escapePath(file, false))
|
||||
}
|
||||
|
||||
// escapePath will escape control characters in path.
|
||||
// It won't quote just due to backslashes on Windows.
|
||||
func escapePath(path string, forceQuotes bool) string {
|
||||
path = encode(path)
|
||||
test := path
|
||||
if runtime.GOOS == "windows" {
|
||||
test = strings.ReplaceAll(path, "\\", "/")
|
||||
|
@ -61,31 +47,3 @@ func escapePath(path string, forceQuotes bool) string {
|
|||
func quotePath(path string) string {
|
||||
return escapePath(path, true)
|
||||
}
|
||||
|
||||
var Colors bool // Colors controls whether terminal colors are enabled
|
||||
|
||||
// Color handles terminal colors for bisync
|
||||
func Color(style string, s string) string {
|
||||
if !Colors {
|
||||
return s
|
||||
}
|
||||
terminal.Start()
|
||||
return style + s + terminal.Reset
|
||||
}
|
||||
|
||||
func encode(s string) string {
|
||||
return encoder.OS.ToStandardPath(encoder.OS.FromStandardPath(s))
|
||||
}
|
||||
|
||||
// prettyprint formats JSON for improved readability in debug logs
|
||||
func prettyprint(in any, label string, level fs.LogLevel) {
|
||||
inBytes, err := json.MarshalIndent(in, "", "\t")
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "failed to marshal input: %v", err)
|
||||
}
|
||||
if level == fs.LogLevelDebug {
|
||||
fs.Debugf(nil, "%s: \n%s\n", label, string(inBytes))
|
||||
} else if level == fs.LogLevelInfo {
|
||||
fs.Infof(nil, "%s: \n%s\n", label, string(inBytes))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,247 +0,0 @@
|
|||
package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/march"
|
||||
)
|
||||
|
||||
var ls1 = newFileList()
|
||||
var ls2 = newFileList()
|
||||
var err error
|
||||
var firstErr error
|
||||
var marchAliasLock sync.Mutex
|
||||
var marchLsLock sync.Mutex
|
||||
var marchErrLock sync.Mutex
|
||||
var marchCtx context.Context
|
||||
|
||||
func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
marchCtx = ctx
|
||||
b.setupListing()
|
||||
fs.Debugf(b, "starting to march!")
|
||||
|
||||
// set up a march over fdst (Path2) and fsrc (Path1)
|
||||
m := &march.March{
|
||||
Ctx: ctx,
|
||||
Fdst: b.fs2,
|
||||
Fsrc: b.fs1,
|
||||
Dir: "",
|
||||
NoTraverse: false,
|
||||
Callback: b,
|
||||
DstIncludeAll: false,
|
||||
NoCheckDest: false,
|
||||
NoUnicodeNormalization: ci.NoUnicodeNormalization,
|
||||
}
|
||||
err = m.Run(ctx)
|
||||
|
||||
fs.Debugf(b, "march completed. err: %v", err)
|
||||
if err == nil {
|
||||
err = firstErr
|
||||
}
|
||||
if err != nil {
|
||||
b.handleErr("march", "error during march", err, true, true)
|
||||
b.abort = true
|
||||
return ls1, ls2, err
|
||||
}
|
||||
|
||||
// save files
|
||||
if b.opt.Compare.DownloadHash && ls1.hash == hash.None {
|
||||
ls1.hash = hash.MD5
|
||||
}
|
||||
if b.opt.Compare.DownloadHash && ls2.hash == hash.None {
|
||||
ls2.hash = hash.MD5
|
||||
}
|
||||
err = ls1.save(ctx, b.newListing1)
|
||||
b.handleErr(ls1, "error saving ls1 from march", err, true, true)
|
||||
err = ls2.save(ctx, b.newListing2)
|
||||
b.handleErr(ls2, "error saving ls2 from march", err, true, true)
|
||||
|
||||
return ls1, ls2, err
|
||||
}
|
||||
|
||||
// SrcOnly have an object which is on path1 only
|
||||
func (b *bisyncRun) SrcOnly(o fs.DirEntry) (recurse bool) {
|
||||
fs.Debugf(o, "path1 only")
|
||||
b.parse(o, true)
|
||||
return isDir(o)
|
||||
}
|
||||
|
||||
// DstOnly have an object which is on path2 only
|
||||
func (b *bisyncRun) DstOnly(o fs.DirEntry) (recurse bool) {
|
||||
fs.Debugf(o, "path2 only")
|
||||
b.parse(o, false)
|
||||
return isDir(o)
|
||||
}
|
||||
|
||||
// Match is called when object exists on both path1 and path2 (whether equal or not)
|
||||
func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) {
|
||||
fs.Debugf(o1, "both path1 and path2")
|
||||
marchAliasLock.Lock()
|
||||
b.aliases.Add(o1.Remote(), o2.Remote())
|
||||
marchAliasLock.Unlock()
|
||||
b.parse(o1, true)
|
||||
b.parse(o2, false)
|
||||
return isDir(o1)
|
||||
}
|
||||
|
||||
func isDir(e fs.DirEntry) bool {
|
||||
switch x := e.(type) {
|
||||
case fs.Object:
|
||||
fs.Debugf(x, "is Object")
|
||||
return false
|
||||
case fs.Directory:
|
||||
fs.Debugf(x, "is Dir")
|
||||
return true
|
||||
default:
|
||||
fs.Debugf(e, "is unknown")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *bisyncRun) parse(e fs.DirEntry, isPath1 bool) {
|
||||
switch x := e.(type) {
|
||||
case fs.Object:
|
||||
b.ForObject(x, isPath1)
|
||||
case fs.Directory:
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
b.ForDir(x, isPath1)
|
||||
}
|
||||
default:
|
||||
fs.Debugf(e, "is unknown")
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setupListing() {
|
||||
ls1 = newFileList()
|
||||
ls2 = newFileList()
|
||||
|
||||
// note that --ignore-listing-checksum is different from --ignore-checksum
|
||||
// and we already checked it when we set b.opt.Compare.HashType1 and 2
|
||||
ls1.hash = b.opt.Compare.HashType1
|
||||
ls2.hash = b.opt.Compare.HashType2
|
||||
}
|
||||
|
||||
func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) {
|
||||
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1))
|
||||
defer func() {
|
||||
tr.Done(marchCtx, nil)
|
||||
}()
|
||||
var (
|
||||
hashVal string
|
||||
hashErr error
|
||||
)
|
||||
ls := whichLs(isPath1)
|
||||
hashType := ls.hash
|
||||
if hashType != hash.None {
|
||||
hashVal, hashErr = o.Hash(marchCtx, hashType)
|
||||
marchErrLock.Lock()
|
||||
if firstErr == nil {
|
||||
firstErr = hashErr
|
||||
}
|
||||
marchErrLock.Unlock()
|
||||
}
|
||||
hashVal, hashErr = tryDownloadHash(marchCtx, o, hashVal)
|
||||
marchErrLock.Lock()
|
||||
if firstErr == nil {
|
||||
firstErr = hashErr
|
||||
}
|
||||
if firstErr != nil {
|
||||
b.handleErr(hashType, "error hashing during march", firstErr, false, true)
|
||||
}
|
||||
marchErrLock.Unlock()
|
||||
|
||||
var modtime time.Time
|
||||
if b.opt.Compare.Modtime {
|
||||
modtime = o.ModTime(marchCtx).In(TZ)
|
||||
}
|
||||
id := "" // TODO: ID(o)
|
||||
flags := "-" // "-" for a file and "d" for a directory
|
||||
marchLsLock.Lock()
|
||||
ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags)
|
||||
marchLsLock.Unlock()
|
||||
}
|
||||
|
||||
func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) {
|
||||
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1))
|
||||
defer func() {
|
||||
tr.Done(marchCtx, nil)
|
||||
}()
|
||||
ls := whichLs(isPath1)
|
||||
var modtime time.Time
|
||||
if b.opt.Compare.Modtime {
|
||||
modtime = o.ModTime(marchCtx).In(TZ)
|
||||
}
|
||||
id := "" // TODO
|
||||
flags := "d" // "-" for a file and "d" for a directory
|
||||
marchLsLock.Lock()
|
||||
ls.put(o.Remote(), -1, modtime, "", id, flags)
|
||||
marchLsLock.Unlock()
|
||||
}
|
||||
|
||||
func whichLs(isPath1 bool) *fileList {
|
||||
ls := ls1
|
||||
if !isPath1 {
|
||||
ls = ls2
|
||||
}
|
||||
return ls
|
||||
}
|
||||
|
||||
func whichPath(isPath1 bool) string {
|
||||
s := "Path1"
|
||||
if !isPath1 {
|
||||
s = "Path2"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, error) {
|
||||
ctxCheckFile, filterCheckFile := filter.AddConfig(ctx)
|
||||
b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true)
|
||||
b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true)
|
||||
ci := fs.GetConfig(ctxCheckFile)
|
||||
marchCtx = ctxCheckFile
|
||||
|
||||
b.setupListing()
|
||||
fs.Debugf(b, "starting to march!")
|
||||
|
||||
// set up a march over fdst (Path2) and fsrc (Path1)
|
||||
m := &march.March{
|
||||
Ctx: ctxCheckFile,
|
||||
Fdst: b.fs2,
|
||||
Fsrc: b.fs1,
|
||||
Dir: "",
|
||||
NoTraverse: false,
|
||||
Callback: b,
|
||||
DstIncludeAll: false,
|
||||
NoCheckDest: false,
|
||||
NoUnicodeNormalization: ci.NoUnicodeNormalization,
|
||||
}
|
||||
err = m.Run(ctxCheckFile)
|
||||
|
||||
fs.Debugf(b, "march completed. err: %v", err)
|
||||
if err == nil {
|
||||
err = firstErr
|
||||
}
|
||||
if err != nil {
|
||||
b.handleErr("march", "error during findCheckFiles", err, true, true)
|
||||
b.abort = true
|
||||
}
|
||||
|
||||
return ls1, ls2, err
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func ID(o fs.Object) string {
|
||||
do, ok := o.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
|
@ -9,18 +9,15 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"strconv"
|
||||
gosync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// ErrBisyncAborted signals that bisync is aborted and forces exit code 2
|
||||
|
@ -28,49 +25,23 @@ var ErrBisyncAborted = errors.New("bisync aborted")
|
|||
|
||||
// bisyncRun keeps bisync runtime state
|
||||
type bisyncRun struct {
|
||||
fs1 fs.Fs
|
||||
fs2 fs.Fs
|
||||
abort bool
|
||||
critical bool
|
||||
retryable bool
|
||||
basePath string
|
||||
workDir string
|
||||
listing1 string
|
||||
listing2 string
|
||||
newListing1 string
|
||||
newListing2 string
|
||||
aliases bilib.AliasMap
|
||||
opt *Options
|
||||
octx context.Context
|
||||
fctx context.Context
|
||||
InGracefulShutdown bool
|
||||
CleanupCompleted bool
|
||||
SyncCI *fs.ConfigInfo
|
||||
CancelSync context.CancelFunc
|
||||
DebugName string
|
||||
lockFile string
|
||||
renames renames
|
||||
resyncIs1to2 bool
|
||||
}
|
||||
|
||||
type queues struct {
|
||||
copy1to2 bilib.Names
|
||||
copy2to1 bilib.Names
|
||||
renameSkipped bilib.Names // not renamed because it was equal
|
||||
skippedDirs1 *fileList
|
||||
skippedDirs2 *fileList
|
||||
deletedonboth bilib.Names
|
||||
fs1 fs.Fs
|
||||
fs2 fs.Fs
|
||||
abort bool
|
||||
critical bool
|
||||
retryable bool
|
||||
basePath string
|
||||
workDir string
|
||||
opt *Options
|
||||
}
|
||||
|
||||
// Bisync handles lock file, performs bisync run and checks exit status
|
||||
func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
defer resetGlobals()
|
||||
opt := *optArg // ensure that input is never changed
|
||||
b := &bisyncRun{
|
||||
fs1: fs1,
|
||||
fs2: fs2,
|
||||
opt: &opt,
|
||||
DebugName: opt.DebugName,
|
||||
fs1: fs1,
|
||||
fs2: fs2,
|
||||
opt: &opt,
|
||||
}
|
||||
|
||||
if opt.CheckFilename == "" {
|
||||
|
@ -79,23 +50,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
if opt.Workdir == "" {
|
||||
opt.Workdir = DefaultWorkdir
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
opt.OrigBackupDir = ci.BackupDir
|
||||
|
||||
if ci.TerminalColorMode == fs.TerminalColorModeAlways || (ci.TerminalColorMode == fs.TerminalColorModeAuto && !log.Redirected()) {
|
||||
Colors = true
|
||||
}
|
||||
|
||||
err = b.setCompareDefaults(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.setResyncDefaults()
|
||||
|
||||
err = b.setResolveDefaults(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
if !opt.DryRun && !opt.Force {
|
||||
if fs1.Precision() == fs.ModTimeNotSupported {
|
||||
return errors.New("modification time support is missing on path1")
|
||||
}
|
||||
if fs2.Precision() == fs.ModTimeNotSupported {
|
||||
return errors.New("modification time support is missing on path2")
|
||||
}
|
||||
}
|
||||
|
||||
if b.workDir, err = filepath.Abs(opt.Workdir); err != nil {
|
||||
|
@ -106,62 +68,41 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
}
|
||||
|
||||
// Produce a unique name for the sync operation
|
||||
b.basePath = bilib.BasePath(ctx, b.workDir, b.fs1, b.fs2)
|
||||
b.listing1 = b.basePath + ".path1.lst"
|
||||
b.listing2 = b.basePath + ".path2.lst"
|
||||
b.newListing1 = b.listing1 + "-new"
|
||||
b.newListing2 = b.listing2 + "-new"
|
||||
b.aliases = bilib.AliasMap{}
|
||||
|
||||
err = b.checkSyntax()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.basePath = filepath.Join(b.workDir, bilib.SessionName(b.fs1, b.fs2))
|
||||
listing1 := b.basePath + ".path1.lst"
|
||||
listing2 := b.basePath + ".path2.lst"
|
||||
|
||||
// Handle lock file
|
||||
err = b.setLockFile()
|
||||
if err != nil {
|
||||
return err
|
||||
lockFile := ""
|
||||
if !opt.DryRun {
|
||||
lockFile = b.basePath + ".lck"
|
||||
if bilib.FileExists(lockFile) {
|
||||
return fmt.Errorf("prior lock file found: %s", lockFile)
|
||||
}
|
||||
|
||||
pidStr := []byte(strconv.Itoa(os.Getpid()))
|
||||
if err = os.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
|
||||
return fmt.Errorf("cannot create lock file: %s: %w", lockFile, err)
|
||||
}
|
||||
fs.Debugf(nil, "Lock file created: %s", lockFile)
|
||||
}
|
||||
|
||||
// Handle SIGINT
|
||||
var finaliseOnce gosync.Once
|
||||
|
||||
markFailed := func(file string) {
|
||||
failFile := file + "-err"
|
||||
if bilib.FileExists(file) {
|
||||
_ = os.Remove(failFile)
|
||||
_ = os.Rename(file, failFile)
|
||||
}
|
||||
}
|
||||
finalise := func() {
|
||||
finaliseOnce.Do(func() {
|
||||
if atexit.Signalled() {
|
||||
if b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)"))
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)"))
|
||||
b.InGracefulShutdown = true
|
||||
if b.SyncCI != nil {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
|
||||
b.SyncCI.MaxTransfer = 1
|
||||
b.SyncCI.MaxDuration = 1 * time.Second
|
||||
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
||||
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
||||
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up"))
|
||||
b.CancelSync()
|
||||
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
|
||||
}
|
||||
} else {
|
||||
// we haven't started to sync yet, so we're good.
|
||||
// no need to worry about the listing files, as we haven't overwritten them yet.
|
||||
b.CleanupCompleted = true
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
if !b.CleanupCompleted {
|
||||
if !b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed."))
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover."))
|
||||
}
|
||||
markFailed(b.listing1)
|
||||
markFailed(b.listing2)
|
||||
}
|
||||
b.removeLockFile()
|
||||
fs.Logf(nil, "Bisync interrupted. Must run --resync to recover.")
|
||||
markFailed(listing1)
|
||||
markFailed(listing2)
|
||||
_ = os.Remove(lockFile)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -169,55 +110,53 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
defer atexit.Unregister(fnHandle)
|
||||
|
||||
// run bisync
|
||||
err = b.runLocked(ctx)
|
||||
err = b.runLocked(ctx, listing1, listing2)
|
||||
|
||||
b.removeLockFile()
|
||||
|
||||
b.CleanupCompleted = true
|
||||
if b.InGracefulShutdown {
|
||||
if err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful {
|
||||
err = nil
|
||||
b.critical = false
|
||||
}
|
||||
if err == nil {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
if lockFile != "" {
|
||||
errUnlock := os.Remove(lockFile)
|
||||
if errUnlock == nil {
|
||||
fs.Debugf(nil, "Lock file removed: %s", lockFile)
|
||||
} else if err == nil {
|
||||
err = errUnlock
|
||||
} else {
|
||||
fs.Errorf(nil, "cannot remove lockfile %s: %v", lockFile, errUnlock)
|
||||
}
|
||||
}
|
||||
|
||||
if b.critical {
|
||||
if b.retryable && b.opt.Resilient {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode."))
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")
|
||||
} else {
|
||||
if bilib.FileExists(b.listing1) {
|
||||
_ = os.Rename(b.listing1, b.listing1+"-err")
|
||||
if bilib.FileExists(listing1) {
|
||||
_ = os.Rename(listing1, listing1+"-err")
|
||||
}
|
||||
if bilib.FileExists(b.listing2) {
|
||||
_ = os.Rename(b.listing2, b.listing2+"-err")
|
||||
if bilib.FileExists(listing2) {
|
||||
_ = os.Rename(listing2, listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover."))
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
|
||||
}
|
||||
return ErrBisyncAborted
|
||||
}
|
||||
if b.abort && !b.InGracefulShutdown {
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again."))
|
||||
if b.abort {
|
||||
fs.Logf(nil, "Bisync aborted. Please try again.")
|
||||
}
|
||||
if err == nil {
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful"))
|
||||
fs.Infof(nil, "Bisync successful")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// runLocked performs a full bisync run
|
||||
func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (err error) {
|
||||
opt := b.opt
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
if opt.CheckSync == CheckSyncOnly {
|
||||
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err = b.checkSync(b.listing1, b.listing2); err != nil {
|
||||
if err = b.checkSync(listing1, listing2); err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
}
|
||||
|
@ -228,16 +167,14 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
|
||||
if opt.DryRun {
|
||||
// In --dry-run mode, preserve original listings and save updates to the .lst-dry files
|
||||
origListing1 := b.listing1
|
||||
origListing2 := b.listing2
|
||||
b.listing1 += "-dry"
|
||||
b.listing2 += "-dry"
|
||||
b.newListing1 = b.listing1 + "-new"
|
||||
b.newListing2 = b.listing2 + "-new"
|
||||
if err := bilib.CopyFileIfExists(origListing1, b.listing1); err != nil {
|
||||
origListing1 := listing1
|
||||
origListing2 := listing2
|
||||
listing1 += "-dry"
|
||||
listing2 += "-dry"
|
||||
if err := bilib.CopyFileIfExists(origListing1, listing1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bilib.CopyFileIfExists(origListing2, b.listing2); err != nil {
|
||||
if err := bilib.CopyFileIfExists(origListing2, listing2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -249,65 +186,24 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
b.retryable = true
|
||||
return
|
||||
}
|
||||
b.octx = octx
|
||||
b.fctx = fctx
|
||||
|
||||
// overlapping paths check
|
||||
err = b.overlappingPathsCheck(fctx, b.fs1, b.fs2)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate Path1 and Path2 listings and copy any unique Path2 files to Path1
|
||||
if opt.Resync {
|
||||
return b.resync(octx, fctx)
|
||||
return b.resync(octx, fctx, listing1, listing2)
|
||||
}
|
||||
|
||||
// Check for existence of prior Path1 and Path2 listings
|
||||
if !bilib.FileExists(b.listing1) || !bilib.FileExists(b.listing2) {
|
||||
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
|
||||
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip)
|
||||
if opt.CheckSync != CheckSyncFalse {
|
||||
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
|
||||
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err = b.checkSync(b.listing1+"-old", b.listing2+"-old"); err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid."))
|
||||
}
|
||||
b.revertToOldListings()
|
||||
} else {
|
||||
// On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
errTip := Color(terminal.MagentaFg, "Tip: here are the filenames we were looking for. Do they exist? \n")
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s\n"), Color(terminal.HiBlueFg, b.listing2))
|
||||
errTip += Color(terminal.MagentaFg, "Try running this command to inspect the work dir: \n")
|
||||
errTip += fmt.Sprintf(Color(terminal.HiCyanFg, "rclone lsl \"%s\""), b.workDir)
|
||||
|
||||
return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run \n" + errTip)
|
||||
}
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Building Path1 and Path2 listings")
|
||||
ls1, ls2, err = b.makeMarchListing(fctx)
|
||||
if err != nil || accounting.Stats(fctx).Errored() {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
||||
if !bilib.FileExists(listing1) || !bilib.FileExists(listing2) {
|
||||
// On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run")
|
||||
}
|
||||
|
||||
// Check for Path1 deltas relative to the prior sync
|
||||
fs.Infof(nil, "Path1 checking for diffs")
|
||||
ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, ls1, "Path1")
|
||||
newListing1 := listing1 + "-new"
|
||||
ds1, err := b.findDeltas(fctx, b.fs1, listing1, newListing1, "Path1")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -315,7 +211,8 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
|
||||
// Check for Path2 deltas relative to the prior sync
|
||||
fs.Infof(nil, "Path2 checking for diffs")
|
||||
ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, ls2, "Path2")
|
||||
newListing2 := listing2 + "-new"
|
||||
ds2, err := b.findDeltas(fctx, b.fs2, listing2, newListing2, "Path2")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -359,63 +256,38 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
|
||||
// Determine and apply changes to Path1 and Path2
|
||||
noChanges := ds1.empty() && ds2.empty()
|
||||
changes1 := false // 2to1
|
||||
changes2 := false // 1to2
|
||||
results2to1 := []Results{}
|
||||
results1to2 := []Results{}
|
||||
|
||||
queues := queues{}
|
||||
|
||||
changes1 := false
|
||||
changes2 := false
|
||||
if noChanges {
|
||||
fs.Infof(nil, "No changes found")
|
||||
} else {
|
||||
fs.Infof(nil, "Applying changes")
|
||||
changes1, changes2, results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
changes1, changes2, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) {
|
||||
fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err)
|
||||
} else {
|
||||
b.critical = true
|
||||
// b.retryable = true // not sure about this one
|
||||
return err
|
||||
}
|
||||
b.critical = true
|
||||
// b.retryable = true // not sure about this one
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up and check listings integrity
|
||||
fs.Infof(nil, "Updating listings")
|
||||
var err1, err2 error
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
l2, _ := b.loadListing(b.listing2)
|
||||
newl1, _ := b.loadListing(b.newListing1)
|
||||
newl2, _ := b.loadListing(b.newListing2)
|
||||
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName)))
|
||||
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, newls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName)))
|
||||
}
|
||||
b.saveOldListings()
|
||||
// save new listings
|
||||
// NOTE: "changes" in this case does not mean this run vs. last run, it means start of this run vs. end of this run.
|
||||
// i.e. whether we can use the March lst-new as this side's lst without modifying it.
|
||||
if noChanges {
|
||||
b.replaceCurrentListings()
|
||||
err1 = bilib.CopyFileIfExists(newListing1, listing1)
|
||||
err2 = bilib.CopyFileIfExists(newListing2, listing2)
|
||||
} else {
|
||||
if changes1 || b.InGracefulShutdown { // 2to1
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false)
|
||||
if changes1 {
|
||||
_, err1 = b.makeListing(fctx, b.fs1, listing1)
|
||||
} else {
|
||||
err1 = bilib.CopyFileIfExists(b.newListing1, b.listing1)
|
||||
err1 = bilib.CopyFileIfExists(newListing1, listing1)
|
||||
}
|
||||
if changes2 || b.InGracefulShutdown { // 1to2
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true)
|
||||
if changes2 {
|
||||
_, err2 = b.makeListing(fctx, b.fs2, listing2)
|
||||
} else {
|
||||
err2 = bilib.CopyFileIfExists(b.newListing2, b.listing2)
|
||||
err2 = bilib.CopyFileIfExists(newListing2, listing2)
|
||||
}
|
||||
}
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
l2, _ := b.loadListing(b.listing2)
|
||||
b.debug(b.DebugName, fmt.Sprintf("post-modifyListing, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName)))
|
||||
}
|
||||
err = err1
|
||||
if err == nil {
|
||||
err = err2
|
||||
|
@ -427,13 +299,13 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
}
|
||||
|
||||
if !opt.NoCleanup {
|
||||
_ = os.Remove(b.newListing1)
|
||||
_ = os.Remove(b.newListing2)
|
||||
_ = os.Remove(newListing1)
|
||||
_ = os.Remove(newListing2)
|
||||
}
|
||||
|
||||
if opt.CheckSync == CheckSyncTrue && !opt.DryRun {
|
||||
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err := b.checkSync(b.listing1, b.listing2); err != nil {
|
||||
if err := b.checkSync(listing1, listing2); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
@ -442,9 +314,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
// Optional rmdirs for empty directories
|
||||
if opt.RemoveEmptyDirs {
|
||||
fs.Infof(nil, "Removing empty directories")
|
||||
fctx = b.setBackupDir(fctx, 1)
|
||||
err1 := operations.Rmdirs(fctx, b.fs1, "", true)
|
||||
fctx = b.setBackupDir(fctx, 2)
|
||||
err2 := operations.Rmdirs(fctx, b.fs2, "", true)
|
||||
err := err1
|
||||
if err == nil {
|
||||
|
@ -460,6 +330,135 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
// resync implements the --resync mode.
|
||||
// It will generate path1 and path2 listings
|
||||
// and copy any unique path2 files to path1.
|
||||
func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string) error {
|
||||
fs.Infof(nil, "Copying unique Path2 files to Path1")
|
||||
|
||||
newListing1 := listing1 + "-new"
|
||||
filesNow1, err := b.makeListing(fctx, b.fs1, newListing1)
|
||||
if err == nil {
|
||||
err = b.checkListing(filesNow1, newListing1, "current Path1")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newListing2 := listing2 + "-new"
|
||||
filesNow2, err := b.makeListing(fctx, b.fs2, newListing2)
|
||||
if err == nil {
|
||||
err = b.checkListing(filesNow2, newListing2, "current Path2")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check access health on the Path1 and Path2 filesystems
|
||||
// enforce even though this is --resync
|
||||
if b.opt.CheckAccess {
|
||||
fs.Infof(nil, "Checking access health")
|
||||
|
||||
ds1 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
ds2 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
for _, file := range filesNow1.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds1.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range filesNow2.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds2.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
copy2to1 := []string{}
|
||||
for _, file := range filesNow2.list {
|
||||
if !filesNow1.has(file) {
|
||||
b.indent("Path2", file, "Resync will copy to Path1")
|
||||
copy2to1 = append(copy2to1, file)
|
||||
}
|
||||
}
|
||||
|
||||
if len(copy2to1) > 0 {
|
||||
b.indent("Path2", "Path1", "Resync is doing queued copies to")
|
||||
// octx does not have extra filters!
|
||||
err = b.fastCopy(octx, b.fs2, b.fs1, bilib.ToNames(copy2to1), "resync-copy2to1")
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Resynching Path1 to Path2")
|
||||
ctxRun := b.opt.setDryRun(fctx)
|
||||
// fctx has our extra filters added!
|
||||
ctxSync, filterSync := filter.AddConfig(ctxRun)
|
||||
if filterSync.Opt.MinSize == -1 {
|
||||
// prevent overwriting Google Doc files (their size is -1)
|
||||
filterSync.Opt.MinSize = 0
|
||||
}
|
||||
if err = sync.CopyDir(ctxSync, b.fs2, b.fs1, b.opt.CreateEmptySrcDirs); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
// copy Path2 back to Path1, for empty dirs
|
||||
// the fastCopy above cannot include directories, because it relies on --files-from for filtering,
|
||||
// so instead we'll copy them here, relying on fctx for our filtering.
|
||||
|
||||
// This preserves the original resync order for backward compatibility. It is essentially:
|
||||
// rclone copy Path2 Path1 --ignore-existing
|
||||
// rclone copy Path1 Path2 --create-empty-src-dirs
|
||||
// rclone copy Path2 Path1 --create-empty-src-dirs
|
||||
|
||||
// although if we were starting from scratch, it might be cleaner and faster to just do:
|
||||
// rclone copy Path2 Path1 --create-empty-src-dirs
|
||||
// rclone copy Path1 Path2 --create-empty-src-dirs
|
||||
|
||||
fs.Infof(nil, "Resynching Path2 to Path1 (for empty dirs)")
|
||||
|
||||
//note copy (not sync) and dst comes before src
|
||||
if err = sync.CopyDir(ctxSync, b.fs1, b.fs2, b.opt.CreateEmptySrcDirs); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Resync updating listings")
|
||||
if _, err = b.makeListing(fctx, b.fs1, listing1); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = b.makeListing(fctx, b.fs2, listing2); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.opt.NoCleanup {
|
||||
_ = os.Remove(newListing1)
|
||||
_ = os.Remove(newListing2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkSync validates listings
|
||||
func (b *bisyncRun) checkSync(listing1, listing2 string) error {
|
||||
files1, err := b.loadListing(listing1)
|
||||
|
@ -473,22 +472,17 @@ func (b *bisyncRun) checkSync(listing1, listing2 string) error {
|
|||
|
||||
ok := true
|
||||
for _, file := range files1.list {
|
||||
if !files2.has(file) && !files2.has(b.aliases.Alias(file)) {
|
||||
if !files2.has(file) {
|
||||
b.indent("ERROR", file, "Path1 file not found in Path2")
|
||||
ok = false
|
||||
} else {
|
||||
if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, file := range files2.list {
|
||||
if !files1.has(file) && !files1.has(b.aliases.Alias(file)) {
|
||||
if !files1.has(file) {
|
||||
b.indent("ERROR", file, "Path2 file not found in Path1")
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return errors.New("path1 and path2 are out of sync, run --resync to recover")
|
||||
}
|
||||
|
@ -504,9 +498,6 @@ func (b *bisyncRun) checkAccess(checkFiles1, checkFiles2 bilib.Names) error {
|
|||
numChecks1 := len(checkFiles1)
|
||||
numChecks2 := len(checkFiles2)
|
||||
if numChecks1 == 0 || numChecks1 != numChecks2 {
|
||||
if numChecks1 == 0 && numChecks2 == 0 {
|
||||
fs.Logf("--check-access", Color(terminal.RedFg, "Failed to find any files named %s\n More info: %s"), Color(terminal.CyanFg, opt.CheckFilename), Color(terminal.BlueFg, "https://rclone.org/bisync/#check-access"))
|
||||
}
|
||||
fs.Errorf(nil, "%s Path1 count %d, Path2 count %d - %s", prefix, numChecks1, numChecks2, opt.CheckFilename)
|
||||
ok = false
|
||||
}
|
||||
|
@ -531,142 +522,3 @@ func (b *bisyncRun) checkAccess(checkFiles1, checkFiles2 bilib.Names) error {
|
|||
fs.Infof(nil, "Found %d matching %q files on both paths", numChecks1, opt.CheckFilename)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) testFn() {
|
||||
if b.opt.TestFn != nil {
|
||||
b.opt.TestFn()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) handleErr(o interface{}, msg string, err error, critical, retryable bool) {
|
||||
if err != nil {
|
||||
if retryable {
|
||||
b.retryable = true
|
||||
}
|
||||
if critical {
|
||||
b.critical = true
|
||||
b.abort = true
|
||||
fs.Errorf(o, "%s: %v", msg, err)
|
||||
} else {
|
||||
fs.Infof(o, "%s: %v", msg, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setBackupDir overrides --backup-dir with path-specific version, if set, in each direction
|
||||
func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Context {
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.BackupDir = b.opt.OrigBackupDir
|
||||
if destPath == 1 && b.opt.BackupDir1 != "" {
|
||||
ci.BackupDir = b.opt.BackupDir1
|
||||
}
|
||||
if destPath == 2 && b.opt.BackupDir2 != "" {
|
||||
ci.BackupDir = b.opt.BackupDir2
|
||||
}
|
||||
fs.Debugf(ci.BackupDir, "updated backup-dir for Path%d", destPath)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error {
|
||||
if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
|
||||
err = fmt.Errorf(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
||||
return err
|
||||
}
|
||||
// need to test our BackupDirs too, as sync will be fooled by our --files-from filters
|
||||
testBackupDir := func(ctx context.Context, destPath int) error {
|
||||
src := fs1
|
||||
dst := fs2
|
||||
if destPath == 1 {
|
||||
src = fs2
|
||||
dst = fs1
|
||||
}
|
||||
ctxBackupDir := b.setBackupDir(ctx, destPath)
|
||||
ci := fs.GetConfig(ctxBackupDir)
|
||||
if ci.BackupDir != "" {
|
||||
// operations.BackupDir should return an error if not properly excluded
|
||||
_, err = operations.BackupDir(fctx, dst, src, "")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = testBackupDir(fctx, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = testBackupDir(fctx, 2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) checkSyntax() error {
|
||||
// check for odd number of quotes in path, usually indicating an escaping issue
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
if strings.Count(path1, `"`)%2 != 0 || strings.Count(path2, `"`)%2 != 0 {
|
||||
return fmt.Errorf(Color(terminal.RedFg, `detected an odd number of quotes in your path(s). This is usually a mistake indicating incorrect escaping.
|
||||
Please check your command and try again. Note that on Windows, quoted paths must not have a trailing slash, or it will be interpreted as escaping the quote. path1: %v path2: %v`), path1, path2)
|
||||
}
|
||||
// check for other syntax issues
|
||||
_, err = os.Stat(b.basePath)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "syntax is incorrect") {
|
||||
return fmt.Errorf(Color(terminal.RedFg, `syntax error detected in your path(s). Please check your command and try again.
|
||||
Note that on Windows, quoted paths must not have a trailing slash, or it will be interpreted as escaping the quote. path1: %v path2: %v error: %v`), path1, path2, err)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" && (strings.Contains(path1, " --") || strings.Contains(path2, " --")) {
|
||||
return fmt.Errorf(Color(terminal.RedFg, `detected possible flags in your path(s). This is usually a mistake indicating incorrect escaping or quoting (possibly closing quote is missing?).
|
||||
Please check your command and try again. Note that on Windows, quoted paths must not have a trailing slash, or it will be interpreted as escaping the quote. path1: %v path2: %v`), path1, path2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
|
||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue))
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) debugFn(nametocheck string, fn func()) {
|
||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||
fn()
|
||||
}
|
||||
}
|
||||
|
||||
// waitFor runs fn() until it returns true or the timeout expires
|
||||
func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
|
||||
const individualWait = 1 * time.Second
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
ok = fn()
|
||||
if ok {
|
||||
return ok
|
||||
}
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "%s: %vs"), msg, int(totalWait/individualWait)-i)
|
||||
time.Sleep(individualWait)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// mainly to make sure tests don't interfere with each other when running more than one
|
||||
func resetGlobals() {
|
||||
downloadHash = false
|
||||
logger = operations.NewLoggerOpt()
|
||||
ignoreListingChecksum = false
|
||||
ignoreListingModtime = false
|
||||
hashTypes = nil
|
||||
queueCI = nil
|
||||
hashType = 0
|
||||
fsrc, fdst = nil, nil
|
||||
fcrypt = nil
|
||||
Opt = Options{}
|
||||
once = gosync.Once{}
|
||||
downloadHashWarn = gosync.Once{}
|
||||
firstDownloadHash = gosync.Once{}
|
||||
ls1 = newFileList()
|
||||
ls2 = newFileList()
|
||||
err = nil
|
||||
firstErr = nil
|
||||
marchCtx = nil
|
||||
}
|
||||
|
|
|
@ -2,310 +2,67 @@ package bisync
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
mutex "sync" // renamed as "sync" already in use
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// Results represents a pair of synced files, as reported by the LoggerFn
|
||||
// Bisync uses this to determine what happened during the sync, and modify the listings accordingly
|
||||
type Results struct {
|
||||
Src string
|
||||
Dst string
|
||||
Name string
|
||||
AltName string
|
||||
Size int64
|
||||
Modtime time.Time
|
||||
Hash string
|
||||
Flags string
|
||||
Sigil operations.Sigil
|
||||
Err error
|
||||
Winner operations.Winner
|
||||
IsWinner bool
|
||||
IsSrc bool
|
||||
IsDst bool
|
||||
Origin string
|
||||
}
|
||||
|
||||
// ResultsSlice is a slice of Results (obviously)
|
||||
type ResultsSlice []Results
|
||||
|
||||
func (rs *ResultsSlice) has(name string) bool {
|
||||
for _, r := range *rs {
|
||||
if r.Name == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
logger = operations.NewLoggerOpt()
|
||||
lock mutex.Mutex
|
||||
once mutex.Once
|
||||
ignoreListingChecksum bool
|
||||
ignoreListingModtime bool
|
||||
hashTypes map[string]hash.Type
|
||||
queueCI *fs.ConfigInfo
|
||||
)
|
||||
|
||||
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
|
||||
func getHashType(fname string) hash.Type {
|
||||
ht, ok := hashTypes[fname]
|
||||
if ok {
|
||||
return ht
|
||||
}
|
||||
return hash.None
|
||||
}
|
||||
|
||||
// FsPathIfAny handles type assertions and returns a formatted bilib.FsPath if valid, otherwise ""
|
||||
func FsPathIfAny(x fs.DirEntry) string {
|
||||
obj, ok := x.(fs.Object)
|
||||
if x != nil && ok {
|
||||
return bilib.FsPath(obj.Fs())
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func resultName(result Results, side, src, dst fs.DirEntry) string {
|
||||
if side != nil {
|
||||
return side.Remote()
|
||||
} else if result.IsSrc && dst != nil {
|
||||
return dst.Remote()
|
||||
} else if src != nil {
|
||||
return src.Remote()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// returns the opposite side's name, only if different
|
||||
func altName(name string, src, dst fs.DirEntry) string {
|
||||
if src != nil && dst != nil {
|
||||
if src.Remote() != dst.Remote() {
|
||||
switch name {
|
||||
case src.Remote():
|
||||
return dst.Remote()
|
||||
case dst.Remote():
|
||||
return src.Remote()
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// WriteResults is Bisync's LoggerFn
|
||||
func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
opt := operations.GetLoggerOpt(ctx)
|
||||
result := Results{
|
||||
Sigil: sigil,
|
||||
Src: FsPathIfAny(src),
|
||||
Dst: FsPathIfAny(dst),
|
||||
Err: err,
|
||||
Origin: "sync",
|
||||
}
|
||||
|
||||
result.Winner = operations.WinningSide(ctx, sigil, src, dst, err)
|
||||
|
||||
fss := []fs.DirEntry{src, dst}
|
||||
for i, side := range fss {
|
||||
|
||||
result.Name = resultName(result, side, src, dst)
|
||||
result.AltName = altName(result.Name, src, dst)
|
||||
result.IsSrc = i == 0
|
||||
result.IsDst = i == 1
|
||||
result.Flags = "-"
|
||||
if side != nil {
|
||||
result.Size = side.Size()
|
||||
if !ignoreListingModtime {
|
||||
result.Modtime = side.ModTime(ctx).In(TZ)
|
||||
}
|
||||
if !ignoreListingChecksum {
|
||||
sideObj, ok := side.(fs.ObjectInfo)
|
||||
if ok {
|
||||
result.Hash, _ = sideObj.Hash(ctx, getHashType(sideObj.Fs().Name()))
|
||||
result.Hash, _ = tryDownloadHash(ctx, sideObj, result.Hash)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
result.IsWinner = result.Winner.Obj == side
|
||||
|
||||
// used during resync only
|
||||
if err == fs.ErrorIsDir {
|
||||
if src != nil {
|
||||
result.Src = src.Remote()
|
||||
result.Name = src.Remote()
|
||||
} else {
|
||||
result.Dst = dst.Remote()
|
||||
result.Name = dst.Remote()
|
||||
}
|
||||
result.Flags = "d"
|
||||
result.Size = -1
|
||||
}
|
||||
|
||||
prettyprint(result, "writing result", fs.LogLevelDebug)
|
||||
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
||||
once.Do(func() {
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
||||
})
|
||||
}
|
||||
|
||||
err := json.NewEncoder(opt.JSON).Encode(result)
|
||||
if err != nil {
|
||||
fs.Errorf(result, "Error encoding JSON: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReadResults decodes the JSON data from WriteResults
|
||||
func ReadResults(results io.Reader) []Results {
|
||||
dec := json.NewDecoder(results)
|
||||
var slice []Results
|
||||
for {
|
||||
var r Results
|
||||
if err := dec.Decode(&r); err == io.EOF {
|
||||
break
|
||||
}
|
||||
prettyprint(r, "result", fs.LogLevelDebug)
|
||||
slice = append(slice, r)
|
||||
}
|
||||
return slice
|
||||
}
|
||||
|
||||
// for setup code shared by both fastCopy and resyncDir
|
||||
func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
|
||||
queueCI = fs.GetConfig(ctx)
|
||||
ignoreListingChecksum = b.opt.IgnoreListingChecksum
|
||||
ignoreListingModtime = !b.opt.Compare.Modtime
|
||||
hashTypes = map[string]hash.Type{
|
||||
b.fs1.Name(): b.opt.Compare.HashType1,
|
||||
b.fs2.Name(): b.opt.Compare.HashType2,
|
||||
}
|
||||
logger.LoggerFn = WriteResults
|
||||
overridingEqual := false
|
||||
if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash {
|
||||
overridingEqual = true
|
||||
fs.Debugf(nil, "overriding equal")
|
||||
// otherwise impossible in Sync, so override Equal
|
||||
ctx = b.EqualFn(ctx)
|
||||
}
|
||||
if b.opt.ResyncMode == PreferOlder || b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller {
|
||||
overridingEqual = true
|
||||
fs.Debugf(nil, "overriding equal")
|
||||
ctx = b.EqualFn(ctx)
|
||||
}
|
||||
ctxCopyLogger := operations.WithSyncLogger(ctx, logger)
|
||||
if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected {
|
||||
// set here in case !b.opt.Compare.Modtime
|
||||
queueCI = fs.GetConfig(ctxCopyLogger)
|
||||
if b.opt.Compare.NoSlowHash {
|
||||
queueCI.CheckSum = false
|
||||
}
|
||||
if b.opt.Compare.SlowHashSyncOnly && !overridingEqual {
|
||||
queueCI.CheckSum = true
|
||||
}
|
||||
}
|
||||
return ctxCopyLogger
|
||||
}
|
||||
|
||||
func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string) ([]Results, error) {
|
||||
if b.InGracefulShutdown {
|
||||
return nil, nil
|
||||
}
|
||||
ctx = b.preCopy(ctx)
|
||||
func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string) error {
|
||||
if err := b.saveQueue(files, queueName); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
ctxCopy, filterCopy := filter.AddConfig(b.opt.setDryRun(ctx))
|
||||
for _, file := range files.ToList() {
|
||||
if err := filterCopy.AddFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
alias := b.aliases.Alias(file)
|
||||
if alias != file {
|
||||
if err := filterCopy.AddFile(alias); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
|
||||
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
|
||||
b.testFn()
|
||||
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
prettyprint(logger, "logger", fs.LogLevelDebug)
|
||||
|
||||
getResults := ReadResults(logger.JSON)
|
||||
fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName)
|
||||
|
||||
lineFormat := "%s %8d %s %s %s %q\n"
|
||||
for _, result := range getResults {
|
||||
fs.Debugf(nil, lineFormat, result.Flags, result.Size, result.Hash, "", result.Modtime, result.Name)
|
||||
}
|
||||
|
||||
return getResults, err
|
||||
return sync.CopyDir(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
}
|
||||
|
||||
func (b *bisyncRun) retryFastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string, results []Results, err error) ([]Results, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if err != nil && b.opt.Resilient && !b.InGracefulShutdown && ci.Retries > 1 {
|
||||
for tries := 1; tries <= ci.Retries; tries++ {
|
||||
fs.Logf(queueName, Color(terminal.YellowFg, "Received error: %v - retrying as --resilient is set. Retry %d/%d"), err, tries, ci.Retries)
|
||||
accounting.GlobalStats().ResetErrors()
|
||||
if retryAfter := accounting.GlobalStats().RetryAfter(); !retryAfter.IsZero() {
|
||||
d := time.Until(retryAfter)
|
||||
if d > 0 {
|
||||
fs.Logf(nil, "Received retry after error - sleeping until %s (%v)", retryAfter.Format(time.RFC3339Nano), d)
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
if ci.RetriesInterval > 0 {
|
||||
naptime(ci.RetriesInterval)
|
||||
}
|
||||
results, err = b.fastCopy(ctx, fsrc, fdst, files, queueName)
|
||||
if err == nil || b.InGracefulShutdown {
|
||||
return results, err
|
||||
}
|
||||
func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names, queueName string) error {
|
||||
if err := b.saveQueue(files, queueName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transfers := fs.GetConfig(ctx).Transfers
|
||||
|
||||
ctxRun, filterDelete := filter.AddConfig(b.opt.setDryRun(ctx))
|
||||
|
||||
for _, file := range files.ToList() {
|
||||
if err := filterDelete.AddFile(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (b *bisyncRun) resyncDir(ctx context.Context, fsrc, fdst fs.Fs) ([]Results, error) {
|
||||
ctx = b.preCopy(ctx)
|
||||
|
||||
err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
prettyprint(logger, "logger", fs.LogLevelDebug)
|
||||
|
||||
getResults := ReadResults(logger.JSON)
|
||||
fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync")
|
||||
|
||||
return getResults, err
|
||||
objChan := make(fs.ObjectsChan, transfers)
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
errChan <- operations.DeleteFiles(ctxRun, objChan)
|
||||
}()
|
||||
err := operations.ListFn(ctxRun, f, func(obj fs.Object) {
|
||||
remote := obj.Remote()
|
||||
if files.Has(remote) {
|
||||
objChan <- obj
|
||||
}
|
||||
})
|
||||
close(objChan)
|
||||
opErr := <-errChan
|
||||
if err == nil {
|
||||
err = opErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// operation should be "make" or "remove"
|
||||
func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bilib.Names, dirsList *fileList, results *[]Results, operation string) {
|
||||
if b.InGracefulShutdown {
|
||||
return
|
||||
}
|
||||
fs.Debugf(nil, "syncing empty dirs")
|
||||
func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bilib.Names, dirsList *fileList, operation string) {
|
||||
if b.opt.CreateEmptySrcDirs && (!b.opt.Resync || operation == "make") {
|
||||
|
||||
candidatesList := candidates.ToList()
|
||||
|
@ -316,52 +73,18 @@ func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bil
|
|||
|
||||
for _, s := range candidatesList {
|
||||
var direrr error
|
||||
if dirsList.has(s) { // make sure it's a dir, not a file
|
||||
r := Results{}
|
||||
r.Name = s
|
||||
r.Size = -1
|
||||
r.Modtime = dirsList.getTime(s).In(time.UTC)
|
||||
r.Flags = "d"
|
||||
r.Err = nil
|
||||
r.Origin = "syncEmptyDirs"
|
||||
r.Winner = operations.Winner{ // note: Obj not set
|
||||
Side: "src",
|
||||
Err: nil,
|
||||
}
|
||||
|
||||
rSrc := r
|
||||
rDst := r
|
||||
rSrc.IsSrc = true
|
||||
rSrc.IsDst = false
|
||||
rDst.IsSrc = false
|
||||
rDst.IsDst = true
|
||||
rSrc.IsWinner = true
|
||||
rDst.IsWinner = false
|
||||
|
||||
if dirsList.has(s) { //make sure it's a dir, not a file
|
||||
if operation == "remove" {
|
||||
// directories made empty by the sync will have already been deleted during the sync
|
||||
// this just catches the already-empty ones (excluded from sync by --files-from filter)
|
||||
direrr = operations.TryRmdir(ctx, dst, s)
|
||||
rSrc.Sigil = operations.MissingOnSrc
|
||||
rDst.Sigil = operations.MissingOnSrc
|
||||
rSrc.Dst = s
|
||||
rDst.Dst = s
|
||||
rSrc.Winner.Side = "none"
|
||||
rDst.Winner.Side = "none"
|
||||
//note: we need to use Rmdirs instead of Rmdir because directories will fail to delete if they have other empty dirs inside of them.
|
||||
direrr = operations.Rmdirs(ctx, dst, s, false)
|
||||
} else if operation == "make" {
|
||||
direrr = operations.Mkdir(ctx, dst, s)
|
||||
rSrc.Sigil = operations.MissingOnDst
|
||||
rDst.Sigil = operations.MissingOnDst
|
||||
rSrc.Src = s
|
||||
rDst.Src = s
|
||||
} else {
|
||||
direrr = fmt.Errorf("invalid operation. Expected 'make' or 'remove', received '%q'", operation)
|
||||
}
|
||||
|
||||
if direrr != nil {
|
||||
fs.Debugf(nil, "Error syncing directory: %v", direrr)
|
||||
} else {
|
||||
*results = append(*results, rSrc, rDst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -375,16 +98,3 @@ func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error {
|
|||
queueFile := fmt.Sprintf("%s.%s.que", b.basePath, jobName)
|
||||
return files.Save(queueFile)
|
||||
}
|
||||
|
||||
func naptime(totalWait time.Duration) {
|
||||
expireTime := time.Now().Add(totalWait)
|
||||
fs.Logf(nil, "will retry in %v at %v", totalWait, expireTime.Format("2006-01-02 15:04:05 MST"))
|
||||
for i := 0; time.Until(expireTime) > 0; i++ {
|
||||
if i > 0 && i%10 == 0 {
|
||||
fs.Infof(nil, Color(terminal.Dim, "retrying in %v..."), time.Until(expireTime).Round(1*time.Second))
|
||||
} else {
|
||||
fs.Debugf(nil, Color(terminal.Dim, "retrying in %v..."), time.Until(expireTime).Round(1*time.Second))
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,12 +74,6 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
|||
if opt.Workdir, err = in.GetString("workdir"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.BackupDir1, err = in.GetString("backupdir1"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.BackupDir2, err = in.GetString("backupdir2"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
|
||||
checkSync, err := in.GetString("checkSync")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
|
|
|
@ -1,450 +0,0 @@
|
|||
package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// Prefer describes strategies for resolving sync conflicts
|
||||
type Prefer = fs.Enum[preferChoices]
|
||||
|
||||
// Supported --conflict-resolve strategies
|
||||
const (
|
||||
PreferNone Prefer = iota
|
||||
PreferPath1
|
||||
PreferPath2
|
||||
PreferNewer
|
||||
PreferOlder
|
||||
PreferLarger
|
||||
PreferSmaller
|
||||
)
|
||||
|
||||
type preferChoices struct{}
|
||||
|
||||
func (preferChoices) Choices() []string {
|
||||
return []string{
|
||||
PreferNone: "none",
|
||||
PreferNewer: "newer",
|
||||
PreferOlder: "older",
|
||||
PreferLarger: "larger",
|
||||
PreferSmaller: "smaller",
|
||||
PreferPath1: "path1",
|
||||
PreferPath2: "path2",
|
||||
}
|
||||
}
|
||||
|
||||
func (preferChoices) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// ConflictResolveList is a list of --conflict-resolve flag choices used in the help
|
||||
var ConflictResolveList = Opt.ConflictResolve.Help()
|
||||
|
||||
// ConflictLoserAction describes possible actions to take on the loser of a sync conflict
|
||||
type ConflictLoserAction = fs.Enum[conflictLoserChoices]
|
||||
|
||||
// Supported --conflict-loser actions
|
||||
const (
|
||||
ConflictLoserSkip ConflictLoserAction = iota // Reserved as zero but currently unused
|
||||
ConflictLoserNumber // file.conflict1, file.conflict2, file.conflict3, etc.
|
||||
ConflictLoserPathname // file.path1, file.path2
|
||||
ConflictLoserDelete // delete the loser, keep winner only
|
||||
)
|
||||
|
||||
type conflictLoserChoices struct{}
|
||||
|
||||
func (conflictLoserChoices) Choices() []string {
|
||||
return []string{
|
||||
ConflictLoserNumber: "num",
|
||||
ConflictLoserPathname: "pathname",
|
||||
ConflictLoserDelete: "delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (conflictLoserChoices) Type() string {
|
||||
return "ConflictLoserAction"
|
||||
}
|
||||
|
||||
// ConflictLoserList is a list of --conflict-loser flag choices used in the help
|
||||
var ConflictLoserList = Opt.ConflictLoser.Help()
|
||||
|
||||
func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
|
||||
if b.opt.ConflictLoser == ConflictLoserSkip {
|
||||
b.opt.ConflictLoser = ConflictLoserNumber
|
||||
}
|
||||
if b.opt.ConflictSuffixFlag == "" {
|
||||
b.opt.ConflictSuffixFlag = "conflict"
|
||||
}
|
||||
suffixes := strings.Split(b.opt.ConflictSuffixFlag, ",")
|
||||
if len(suffixes) == 1 {
|
||||
b.opt.ConflictSuffix1 = suffixes[0]
|
||||
b.opt.ConflictSuffix2 = suffixes[0]
|
||||
} else if len(suffixes) == 2 {
|
||||
b.opt.ConflictSuffix1 = suffixes[0]
|
||||
b.opt.ConflictSuffix2 = suffixes[1]
|
||||
} else {
|
||||
return fmt.Errorf("--conflict-suffix cannot have more than 2 comma-separated values. Received %v: %v", len(suffixes), suffixes)
|
||||
}
|
||||
// replace glob variables, if any
|
||||
t := time.Now() // capture static time here so it is the same for all files throughout this run
|
||||
b.opt.ConflictSuffix1 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
|
||||
b.opt.ConflictSuffix2 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
|
||||
|
||||
// append dot (intentionally allow more than one)
|
||||
b.opt.ConflictSuffix1 = "." + b.opt.ConflictSuffix1
|
||||
b.opt.ConflictSuffix2 = "." + b.opt.ConflictSuffix2
|
||||
|
||||
// checks and warnings
|
||||
if (b.opt.ConflictResolve == PreferNewer || b.opt.ConflictResolve == PreferOlder) && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --conflict-resolve %s as at least one remote does not support modtimes."), b.opt.ConflictResolve.String())
|
||||
b.opt.ConflictResolve = PreferNone
|
||||
} else if (b.opt.ConflictResolve == PreferNewer || b.opt.ConflictResolve == PreferOlder) && !b.opt.Compare.Modtime {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --conflict-resolve %s as --compare does not include modtime."), b.opt.ConflictResolve.String())
|
||||
b.opt.ConflictResolve = PreferNone
|
||||
}
|
||||
if (b.opt.ConflictResolve == PreferLarger || b.opt.ConflictResolve == PreferSmaller) && !b.opt.Compare.Size {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --conflict-resolve %s as --compare does not include size."), b.opt.ConflictResolve.String())
|
||||
b.opt.ConflictResolve = PreferNone
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type renames map[string]renamesInfo // [originalName]newName (remember the originalName may have an alias)
|
||||
// the newName may be the same as the old name (if winner), but should not be blank, unless we're deleting.
|
||||
// the oldNames may not match each other, if we're normalizing case or unicode
|
||||
// all names should be "remotes" (relative names, without base path)
|
||||
type renamesInfo struct {
|
||||
path1 namePair
|
||||
path2 namePair
|
||||
}
|
||||
type namePair struct {
|
||||
oldName string
|
||||
newName string
|
||||
}
|
||||
|
||||
func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) error {
|
||||
winningPath := 0
|
||||
if b.opt.ConflictResolve != PreferNone {
|
||||
winningPath = b.conflictWinner(ds1, ds2, file, alias)
|
||||
if winningPath > 0 {
|
||||
fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath)
|
||||
} else {
|
||||
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined."))
|
||||
}
|
||||
}
|
||||
|
||||
suff1 := b.opt.ConflictSuffix1 // copy to new var to make sure our changes here don't persist
|
||||
suff2 := b.opt.ConflictSuffix2
|
||||
if b.opt.ConflictLoser == ConflictLoserPathname && b.opt.ConflictSuffix1 == b.opt.ConflictSuffix2 {
|
||||
// numerate, but not if user supplied two different suffixes
|
||||
suff1 += "1"
|
||||
suff2 += "2"
|
||||
}
|
||||
|
||||
r := renamesInfo{
|
||||
path1: namePair{
|
||||
oldName: file,
|
||||
newName: SuffixName(ctxMove, file, suff1),
|
||||
},
|
||||
path2: namePair{
|
||||
oldName: alias,
|
||||
newName: SuffixName(ctxMove, alias, suff2),
|
||||
},
|
||||
}
|
||||
|
||||
// handle auto-numbering
|
||||
// note that we still queue copies for both files, whether or not we renamed
|
||||
// we also set these for ConflictLoserDelete in case there is no winner.
|
||||
if b.opt.ConflictLoser == ConflictLoserNumber || b.opt.ConflictLoser == ConflictLoserDelete {
|
||||
num := b.numerate(ctxMove, 1, file, alias)
|
||||
switch winningPath {
|
||||
case 1: // keep path1, rename path2
|
||||
r.path1.newName = r.path1.oldName
|
||||
r.path2.newName = SuffixName(ctxMove, r.path2.oldName, b.opt.ConflictSuffix2+fmt.Sprint(num))
|
||||
case 2: // keep path2, rename path1
|
||||
r.path1.newName = SuffixName(ctxMove, r.path1.oldName, b.opt.ConflictSuffix1+fmt.Sprint(num))
|
||||
r.path2.newName = r.path2.oldName
|
||||
default: // no winner, so rename both to different numbers (unless suffixes are already different)
|
||||
if b.opt.ConflictSuffix1 == b.opt.ConflictSuffix2 {
|
||||
r.path1.newName = SuffixName(ctxMove, r.path1.oldName, b.opt.ConflictSuffix1+fmt.Sprint(num))
|
||||
// let's just make sure num + 1 is available...
|
||||
num2 := b.numerate(ctxMove, num+1, file, alias)
|
||||
r.path2.newName = SuffixName(ctxMove, r.path2.oldName, b.opt.ConflictSuffix2+fmt.Sprint(num2))
|
||||
} else {
|
||||
// suffixes are different, so numerate independently
|
||||
num = b.numerateSingle(ctxMove, 1, file, alias, 1)
|
||||
r.path1.newName = SuffixName(ctxMove, r.path1.oldName, b.opt.ConflictSuffix1+fmt.Sprint(num))
|
||||
num = b.numerateSingle(ctxMove, 1, file, alias, 2)
|
||||
r.path2.newName = SuffixName(ctxMove, r.path2.oldName, b.opt.ConflictSuffix2+fmt.Sprint(num))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// when winningPath == 0 (no winner), we ignore settings and rename both, do not delete
|
||||
// note also that deletes and renames are mutually exclusive -- we never delete one path and rename the other.
|
||||
if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 1 {
|
||||
// delete 2, copy 1 to 2
|
||||
err = b.delete(ctxMove, r.path2, path2, path1, b.fs2, 2, 1, renameSkipped)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.path2.newName = ""
|
||||
// copy the one that wasn't deleted
|
||||
b.indent("Path1", r.path1.oldName, "Queue copy to Path2")
|
||||
copy1to2.Add(r.path1.oldName)
|
||||
} else if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 2 {
|
||||
// delete 1, copy 2 to 1
|
||||
err = b.delete(ctxMove, r.path1, path1, path2, b.fs1, 1, 2, renameSkipped)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.path1.newName = ""
|
||||
// copy the one that wasn't deleted
|
||||
b.indent("Path2", r.path2.oldName, "Queue copy to Path1")
|
||||
copy2to1.Add(r.path2.oldName)
|
||||
} else {
|
||||
err = b.rename(ctxMove, r.path1, path1, path2, b.fs1, 1, 2, winningPath, copy1to2, renameSkipped)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = b.rename(ctxMove, r.path2, path2, path1, b.fs2, 2, 1, winningPath, copy2to1, renameSkipped)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
b.renames[r.path1.oldName] = r // note map index is path1's oldName, which may be different from path2 if aliases
|
||||
return nil
|
||||
}
|
||||
|
||||
// SuffixName adds the current --conflict-suffix to the remote, obeying
|
||||
// --suffix-keep-extension if set
|
||||
// It is a close cousin of operations.SuffixName, but we don't want to
|
||||
// use ci.Suffix for this because it might be used for --backup-dir.
|
||||
func SuffixName(ctx context.Context, remote, suffix string) string {
|
||||
if suffix == "" {
|
||||
return remote
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.SuffixKeepExtension {
|
||||
var (
|
||||
base = remote
|
||||
exts = ""
|
||||
first = true
|
||||
ext = path.Ext(remote)
|
||||
)
|
||||
for ext != "" {
|
||||
// Look second and subsequent extensions in mime types.
|
||||
// If they aren't found then don't keep it as an extension.
|
||||
if !first && mime.TypeByExtension(ext) == "" {
|
||||
break
|
||||
}
|
||||
base = base[:len(base)-len(ext)]
|
||||
exts = ext + exts
|
||||
first = false
|
||||
ext = path.Ext(base)
|
||||
}
|
||||
return base + suffix + exts
|
||||
}
|
||||
return remote + suffix
|
||||
}
|
||||
|
||||
// NotEmpty checks whether set is not empty
|
||||
func (r renames) NotEmpty() bool {
|
||||
return len(r) > 0
|
||||
}
|
||||
|
||||
func (ri *renamesInfo) getNames(is1to2 bool) (srcOldName, srcNewName, dstOldName, dstNewName string) {
|
||||
if is1to2 {
|
||||
return ri.path1.oldName, ri.path1.newName, ri.path2.oldName, ri.path2.newName
|
||||
}
|
||||
return ri.path2.oldName, ri.path2.newName, ri.path1.oldName, ri.path1.newName
|
||||
}
|
||||
|
||||
// work out the lowest number that niether side has, return it for suffix
|
||||
func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int {
|
||||
for i := startnum; i < math.MaxInt; i++ {
|
||||
iStr := fmt.Sprint(i)
|
||||
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
|
||||
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) &&
|
||||
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
|
||||
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) {
|
||||
// make sure it still holds true with suffixes switched (it should)
|
||||
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
|
||||
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) &&
|
||||
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
|
||||
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) {
|
||||
fs.Debugf(file, "The first available suffix is: %s", iStr)
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems
|
||||
}
|
||||
|
||||
// like numerate, but consider only one side's suffix (for when suffixes are different)
|
||||
func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int {
|
||||
lsA, lsB := ls1, ls2
|
||||
suffix := b.opt.ConflictSuffix1
|
||||
if path == 2 {
|
||||
lsA, lsB = ls2, ls1
|
||||
suffix = b.opt.ConflictSuffix2
|
||||
}
|
||||
for i := startnum; i < math.MaxInt; i++ {
|
||||
iStr := fmt.Sprint(i)
|
||||
if !lsA.has(SuffixName(ctx, file, suffix+iStr)) &&
|
||||
!lsA.has(SuffixName(ctx, alias, suffix+iStr)) &&
|
||||
!lsB.has(SuffixName(ctx, file, suffix+iStr)) &&
|
||||
!lsB.has(SuffixName(ctx, alias, suffix+iStr)) {
|
||||
fs.Debugf(file, "The first available suffix is: %s", iStr)
|
||||
return i
|
||||
}
|
||||
}
|
||||
return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems
|
||||
}
|
||||
|
||||
func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) error {
|
||||
if winningPath == thisPathNum {
|
||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum))
|
||||
} else {
|
||||
skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "rename")
|
||||
if !skip {
|
||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Renaming Path%d copy", thisPathNum))
|
||||
ctx = b.setBackupDir(ctx, thisPathNum) // in case already a file with new name
|
||||
if err = operations.MoveFile(ctx, thisFs, thisFs, thisNamePair.newName, thisNamePair.oldName); err != nil {
|
||||
err = fmt.Errorf("%s rename failed for %s: %w", thisPath, thisPath+thisNamePair.oldName, err)
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
renameSkipped.Add(thisNamePair.oldName) // (due to dry-run, not equality)
|
||||
}
|
||||
}
|
||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thatPath+thisNamePair.newName, fmt.Sprintf("Queue copy to Path%d", thatPathNum))
|
||||
q.Add(thisNamePair.newName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum int, renameSkipped *bilib.Names) error {
|
||||
skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete")
|
||||
if !skip {
|
||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum))
|
||||
ctx = b.setBackupDir(ctx, thisPathNum)
|
||||
ci := fs.GetConfig(ctx)
|
||||
var backupDir fs.Fs
|
||||
if ci.BackupDir != "" {
|
||||
backupDir, err = operations.BackupDir(ctx, thisFs, thisFs, thisNamePair.oldName)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
obj, err := thisFs.NewObject(ctx, thisNamePair.oldName)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
if err = operations.DeleteFileWithBackupDir(ctx, obj, backupDir); err != nil {
|
||||
err = fmt.Errorf("%s delete failed for %s: %w", thisPath, thisPath+thisNamePair.oldName, err)
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
renameSkipped.Add(thisNamePair.oldName) // (due to dry-run, not equality)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) conflictWinner(ds1, ds2 *deltaSet, remote1, remote2 string) int {
|
||||
switch b.opt.ConflictResolve {
|
||||
case PreferPath1:
|
||||
return 1
|
||||
case PreferPath2:
|
||||
return 2
|
||||
case PreferNewer, PreferOlder:
|
||||
t1, t2 := ds1.time[remote1], ds2.time[remote2]
|
||||
return b.resolveNewerOlder(t1, t2, remote1, remote2, b.opt.ConflictResolve)
|
||||
case PreferLarger, PreferSmaller:
|
||||
s1, s2 := ds1.size[remote1], ds2.size[remote2]
|
||||
return b.resolveLargerSmaller(s1, s2, remote1, remote2, b.opt.ConflictResolve)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// returns the winning path number, or 0 if winner can't be determined
|
||||
func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1, remote2 string, prefer Prefer) int {
|
||||
if fs.GetModifyWindow(b.octx, b.fs1, b.fs2) == fs.ModTimeNotSupported {
|
||||
fs.Infof(remote1, "Winner cannot be determined as at least one path lacks modtime support.")
|
||||
return 0
|
||||
}
|
||||
if t1.IsZero() || t2.IsZero() {
|
||||
fs.Infof(remote1, "Winner cannot be determined as at least one modtime is missing. Path1: %v, Path2: %v", t1, t2)
|
||||
return 0
|
||||
}
|
||||
if t1.After(t2) {
|
||||
if prefer == PreferNewer {
|
||||
fs.Infof(remote1, "Path1 is newer. Path1: %v, Path2: %v, Difference: %s", t1, t2, t1.Sub(t2))
|
||||
return 1
|
||||
} else if prefer == PreferOlder {
|
||||
fs.Infof(remote1, "Path2 is older. Path1: %v, Path2: %v, Difference: %s", t1, t2, t1.Sub(t2))
|
||||
return 2
|
||||
}
|
||||
} else if t1.Before(t2) {
|
||||
if prefer == PreferNewer {
|
||||
fs.Infof(remote1, "Path2 is newer. Path1: %v, Path2: %v, Difference: %s", t1, t2, t2.Sub(t1))
|
||||
return 2
|
||||
} else if prefer == PreferOlder {
|
||||
fs.Infof(remote1, "Path1 is older. Path1: %v, Path2: %v, Difference: %s", t1, t2, t2.Sub(t1))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if t1.Equal(t2) {
|
||||
fs.Infof(remote1, "Winner cannot be determined as times are equal. Path1: %v, Path2: %v, Difference: %s", t1, t2, t2.Sub(t1))
|
||||
return 0
|
||||
}
|
||||
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", t1, t2) // shouldn't happen unless prefer is of wrong type
|
||||
return 0
|
||||
}
|
||||
|
||||
// returns the winning path number, or 0 if winner can't be determined
|
||||
func (b *bisyncRun) resolveLargerSmaller(s1, s2 int64, remote1, remote2 string, prefer Prefer) int {
|
||||
if s1 < 0 || s2 < 0 {
|
||||
fs.Infof(remote1, "Winner cannot be determined as at least one size is unknown. Path1: %v, Path2: %v", s1, s2)
|
||||
return 0
|
||||
}
|
||||
if s1 > s2 {
|
||||
if prefer == PreferLarger {
|
||||
fs.Infof(remote1, "Path1 is larger. Path1: %v, Path2: %v, Difference: %v", s1, s2, s1-s2)
|
||||
return 1
|
||||
} else if prefer == PreferSmaller {
|
||||
fs.Infof(remote1, "Path2 is smaller. Path1: %v, Path2: %v, Difference: %v", s1, s2, s1-s2)
|
||||
return 2
|
||||
}
|
||||
} else if s1 < s2 {
|
||||
if prefer == PreferLarger {
|
||||
fs.Infof(remote1, "Path2 is larger. Path1: %v, Path2: %v, Difference: %v", s1, s2, s2-s1)
|
||||
return 2
|
||||
} else if prefer == PreferSmaller {
|
||||
fs.Infof(remote1, "Path1 is smaller. Path1: %v, Path2: %v, Difference: %v", s1, s2, s2-s1)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if s1 == s2 {
|
||||
fs.Infof(remote1, "Winner cannot be determined as sizes are equal. Path1: %v, Path2: %v, Difference: %v", s1, s2, s1-s2)
|
||||
return 0
|
||||
}
|
||||
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", s1, s2) // shouldn't happen unless prefer is of wrong type
|
||||
return 0
|
||||
}
|
|
@ -1,226 +0,0 @@
|
|||
package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// for backward compatibility, --resync is now equivalent to --resync-mode path1
|
||||
// and either flag is sufficient without the other.
|
||||
func (b *bisyncRun) setResyncDefaults() {
|
||||
if b.opt.Resync && b.opt.ResyncMode == PreferNone {
|
||||
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set"))
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
if b.opt.ResyncMode != PreferNone {
|
||||
b.opt.Resync = true
|
||||
Opt.Resync = true // shouldn't be using this one, but set to be safe
|
||||
}
|
||||
|
||||
// checks and warnings
|
||||
if (b.opt.ResyncMode == PreferNewer || b.opt.ResyncMode == PreferOlder) && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --resync-mode %s as at least one remote does not support modtimes."), b.opt.ResyncMode.String())
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
} else if (b.opt.ResyncMode == PreferNewer || b.opt.ResyncMode == PreferOlder) && !b.opt.Compare.Modtime {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --resync-mode %s as --compare does not include modtime."), b.opt.ResyncMode.String())
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
if (b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller) && !b.opt.Compare.Size {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --resync-mode %s as --compare does not include size."), b.opt.ResyncMode.String())
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
}
|
||||
|
||||
// resync implements the --resync mode.
|
||||
// It will generate path1 and path2 listings,
|
||||
// copy any unique files to the opposite path,
|
||||
// and resolve any differing files according to the --resync-mode.
|
||||
func (b *bisyncRun) resync(octx, fctx context.Context) error {
|
||||
fs.Infof(nil, "Copying Path2 files to Path1")
|
||||
|
||||
// Save blank filelists (will be filled from sync results)
|
||||
var ls1 = newFileList()
|
||||
var ls2 = newFileList()
|
||||
err = ls1.save(fctx, b.newListing1)
|
||||
if err != nil {
|
||||
b.handleErr(ls1, "error saving ls1 from resync", err, true, true)
|
||||
b.abort = true
|
||||
}
|
||||
err = ls2.save(fctx, b.newListing2)
|
||||
if err != nil {
|
||||
b.handleErr(ls2, "error saving ls2 from resync", err, true, true)
|
||||
b.abort = true
|
||||
}
|
||||
|
||||
// Check access health on the Path1 and Path2 filesystems
|
||||
// enforce even though this is --resync
|
||||
if b.opt.CheckAccess {
|
||||
fs.Infof(nil, "Checking access health")
|
||||
|
||||
filesNow1, filesNow2, err := b.findCheckFiles(fctx)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
|
||||
ds1 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
ds2 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
for _, file := range filesNow1.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds1.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range filesNow2.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds2.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var results2to1 []Results
|
||||
var results1to2 []Results
|
||||
queues := queues{}
|
||||
|
||||
b.indent("Path2", "Path1", "Resync is copying files to")
|
||||
ctxRun := b.opt.setDryRun(fctx)
|
||||
// fctx has our extra filters added!
|
||||
ctxSync, filterSync := filter.AddConfig(ctxRun)
|
||||
if filterSync.Opt.MinSize == -1 {
|
||||
fs.Debugf(nil, "filterSync.Opt.MinSize: %v", filterSync.Opt.MinSize)
|
||||
}
|
||||
b.resyncIs1to2 = false
|
||||
ctxSync = b.setResyncConfig(ctxSync)
|
||||
ctxSync = b.setBackupDir(ctxSync, 1)
|
||||
// 2 to 1
|
||||
if results2to1, err = b.resyncDir(ctxSync, b.fs2, b.fs1); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
b.indent("Path1", "Path2", "Resync is copying files to")
|
||||
b.resyncIs1to2 = true
|
||||
ctxSync = b.setResyncConfig(ctxSync)
|
||||
ctxSync = b.setBackupDir(ctxSync, 2)
|
||||
// 1 to 2
|
||||
if results1to2, err = b.resyncDir(ctxSync, b.fs1, b.fs2); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Resync updating listings")
|
||||
b.saveOldListings() // may not exist, as this is --resync
|
||||
b.replaceCurrentListings()
|
||||
|
||||
resultsToQueue := func(results []Results) bilib.Names {
|
||||
names := bilib.Names{}
|
||||
for _, result := range results {
|
||||
if result.Name != "" &&
|
||||
(result.Flags != "d" || b.opt.CreateEmptySrcDirs) &&
|
||||
result.IsSrc && result.Src != "" &&
|
||||
(result.Winner.Err == nil || result.Flags == "d") {
|
||||
names.Add(result.Name)
|
||||
}
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// resync 2to1
|
||||
queues.copy2to1 = resultsToQueue(results2to1)
|
||||
if err = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
// resync 1to2
|
||||
queues.copy1to2 = resultsToQueue(results1to2)
|
||||
if err = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if b.opt.CheckSync == CheckSyncTrue && !b.opt.DryRun {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err := b.checkSync(b.listing1, b.listing2); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !b.opt.NoCleanup {
|
||||
_ = os.Remove(b.newListing1)
|
||||
_ = os.Remove(b.newListing2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
--resync-mode implementation:
|
||||
PreferPath1: set ci.IgnoreExisting true, then false
|
||||
PreferPath2: set ci.IgnoreExisting false, then true
|
||||
PreferNewer: set ci.UpdateOlder in both directions
|
||||
PreferOlder: override EqualFn to implement custom logic
|
||||
PreferLarger: override EqualFn to implement custom logic
|
||||
PreferSmaller: override EqualFn to implement custom logic
|
||||
*/
|
||||
func (b *bisyncRun) setResyncConfig(ctx context.Context) context.Context {
|
||||
ci := fs.GetConfig(ctx)
|
||||
switch b.opt.ResyncMode {
|
||||
case PreferPath1:
|
||||
if !b.resyncIs1to2 { // 2to1 (remember 2to1 is first)
|
||||
ci.IgnoreExisting = true
|
||||
} else { // 1to2
|
||||
ci.IgnoreExisting = false
|
||||
}
|
||||
case PreferPath2:
|
||||
if !b.resyncIs1to2 { // 2to1 (remember 2to1 is first)
|
||||
ci.IgnoreExisting = false
|
||||
} else { // 1to2
|
||||
ci.IgnoreExisting = true
|
||||
}
|
||||
case PreferNewer:
|
||||
ci.UpdateOlder = true
|
||||
}
|
||||
// for older, larger, and smaller, we return it unchanged and handle it later
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (b *bisyncRun) resyncWhichIsWhich(src, dst fs.ObjectInfo) (path1, path2 fs.ObjectInfo) {
|
||||
if b.resyncIs1to2 {
|
||||
return src, dst
|
||||
}
|
||||
return dst, src
|
||||
}
|
||||
|
||||
// equal in this context really means "don't transfer", so we should
|
||||
// return true if the files are actually equal or if dest is winner,
|
||||
// false if src is winner
|
||||
// When can't determine, we end up running the normal Equal() to tie-break (due to our differ functions).
|
||||
func (b *bisyncRun) resyncWinningPathToEqual(winningPath int) bool {
|
||||
if b.resyncIs1to2 {
|
||||
return winningPath != 1
|
||||
}
|
||||
return winningPath != 2
|
||||
}
|
|
@ -1,9 +1,9 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
|
@ -1,9 +1,9 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
|
@ -1,138 +1,90 @@
|
|||
[36m(01) :[0m [34mtest all-changed[0m
|
||||
(01) : test all-changed
|
||||
|
||||
|
||||
[36m(02) :[0m [34mtest initial bisync[0m
|
||||
[36m(03) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
(02) : test initial bisync
|
||||
(03) : bisync resync
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Copying unique Path2 files to Path1
|
||||
INFO : Resynching Path1 to Path2
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
INFO : Bisync successful
|
||||
|
||||
[36m(04) :[0m [34mtest change timestamp on all files except RCLONE_TEST[0m
|
||||
[36m(05) :[0m [34mtouch-glob 2005-01-02 {path1/} file*[0m
|
||||
[36m(06) :[0m [34mtouch-glob 2005-01-02 {path1/}subdir file*[0m
|
||||
(04) : test change timestamp on all files except RCLONE_TEST
|
||||
(05) : touch-glob 2005-01-02 {path1/} file*
|
||||
(06) : touch-glob 2005-01-02 {path1/}subdir file*
|
||||
|
||||
[36m(07) :[0m [34mtest sync should pass[0m
|
||||
[36m(08) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
(07) : test sync should pass
|
||||
(08) : bisync
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36msubdir/file20.txt[0m
|
||||
INFO : Path1: 7 changes: [32m 0 new[0m, [33m 7 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 7 newer[0m, [34m 0 older[0m)
|
||||
INFO : - Path1 File is newer - file1.copy1.txt
|
||||
INFO : - Path1 File is newer - file1.copy2.txt
|
||||
INFO : - Path1 File is newer - file1.copy3.txt
|
||||
INFO : - Path1 File is newer - file1.copy4.txt
|
||||
INFO : - Path1 File is newer - file1.copy5.txt
|
||||
INFO : - Path1 File is newer - file1.txt
|
||||
INFO : - Path1 File is newer - subdir/file20.txt
|
||||
INFO : Path1: 7 changes: 0 new, 7 newer, 0 older, 0 deleted
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy1.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy2.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy3.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy4.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy5.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}subdir/file20.txt
|
||||
INFO : - Path1 Do queued copies to - Path2
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
INFO : Bisync successful
|
||||
|
||||
[36m(09) :[0m [34mtest change timestamp on all files including RCLONE_TEST[0m
|
||||
[36m(10) :[0m [34mtouch-glob 2004-01-02 {path1/} *[0m
|
||||
[36m(11) :[0m [34mtouch-glob 2004-01-02 {path1/}subdir *[0m
|
||||
(09) : test change timestamp on all files including RCLONE_TEST
|
||||
(10) : touch-glob 2004-01-02 {path1/} *
|
||||
(11) : touch-glob 2004-01-02 {path1/}subdir *
|
||||
|
||||
[36m(12) :[0m [34mtest sync should fail[0m
|
||||
[36m(13) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
(12) : test sync should fail
|
||||
(13) : bisync
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mRCLONE_TEST[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36msubdir/file20.txt[0m
|
||||
INFO : Path1: 8 changes: [32m 0 new[0m, [33m 8 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 7 older[0m)
|
||||
INFO : - Path1 File is OLDER - file1.copy1.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy2.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy3.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy4.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy5.txt
|
||||
INFO : - Path1 File is OLDER - file1.txt
|
||||
INFO : - Path1 File is OLDER - subdir/file20.txt
|
||||
INFO : - Path1 File is newer - RCLONE_TEST
|
||||
INFO : Path1: 8 changes: 0 new, 1 newer, 7 older, 0 deleted
|
||||
INFO : Path2 checking for diffs
|
||||
ERROR : Safety abort: all files were changed on Path1 "{path1/}". Run with --force if desired.
|
||||
NOTICE: [31mBisync aborted. Please try again.[0m
|
||||
NOTICE: Bisync aborted. Please try again.
|
||||
Bisync error: all files were changed
|
||||
|
||||
[36m(14) :[0m [34mtest sync with force should pass[0m
|
||||
[36m(15) :[0m [34mbisync force[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
(14) : test sync with force should pass
|
||||
(15) : bisync force
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mRCLONE_TEST[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36msubdir/file20.txt[0m
|
||||
INFO : Path1: 8 changes: [32m 0 new[0m, [33m 8 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 7 older[0m)
|
||||
INFO : - Path1 File is OLDER - file1.copy1.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy2.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy3.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy4.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy5.txt
|
||||
INFO : - Path1 File is OLDER - file1.txt
|
||||
INFO : - Path1 File is OLDER - subdir/file20.txt
|
||||
INFO : - Path1 File is newer - RCLONE_TEST
|
||||
INFO : Path1: 8 changes: 0 new, 1 newer, 7 older, 0 deleted
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}RCLONE_TEST[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}RCLONE_TEST
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy1.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy2.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy3.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy4.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy5.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}subdir/file20.txt
|
||||
INFO : - Path1 Do queued copies to - Path2
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
INFO : Bisync successful
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
"file11.txt"
|
||||
"file2.txt"
|
||||
"file4.txt"
|
||||
"file5.txt.conflict1"
|
||||
"file7.txt"
|
|
@ -1,5 +0,0 @@
|
|||
"file1.txt"
|
||||
"file10.txt"
|
||||
"file3.txt"
|
||||
"file5.txt.conflict2"
|
||||
"file6.txt"
|
|
@ -1 +0,0 @@
|
|||
"file3.txt"
|
|
@ -1,10 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 - - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 - - 2001-03-04T00:00:00.000000000+0000 "file5.txt.conflict1"
|
||||
- 39 - - 2001-01-02T00:00:00.000000000+0000 "file5.txt.conflict2"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
|
@ -1,10 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 - - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 - - 2001-03-04T00:00:00.000000000+0000 "file5.txt.conflict1"
|
||||
- 39 - - 2001-01-02T00:00:00.000000000+0000 "file5.txt.conflict2"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
|
@ -1,10 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 - - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 - - 2001-03-04T00:00:00.000000000+0000 "file5.txt.conflict1"
|
||||
- 39 - - 2001-01-02T00:00:00.000000000+0000 "file5.txt.conflict2"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
|
@ -1,10 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 - - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 - - 2001-03-04T00:00:00.000000000+0000 "file5.txt.conflict1"
|
||||
- 39 - - 2001-01-02T00:00:00.000000000+0000 "file5.txt.conflict2"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
|
@ -1,10 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 - - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 - - 2001-03-04T00:00:00.000000000+0000 "file5.txt.conflict1"
|
||||
- 39 - - 2001-01-02T00:00:00.000000000+0000 "file5.txt.conflict2"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
|
@ -1,10 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 - - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 - - 2001-03-04T00:00:00.000000000+0000 "file5.txt.conflict1"
|
||||
- 39 - - 2001-01-02T00:00:00.000000000+0000 "file5.txt.conflict2"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
|
@ -1,154 +0,0 @@
|
|||
[36m(01) :[0m [34mtest backupdir[0m
|
||||
|
||||
|
||||
[36m(02) :[0m [34mtest initial bisync[0m
|
||||
[36m(03) :[0m [34mbisync resync backupdir1={workdir/}backupdirs/backupdir1 backupdir2={workdir/}backupdirs/backupdir2[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(04) :[0m [34mtest make modifications on both paths[0m
|
||||
[36m(05) :[0m [34mtest new on path2 - file10[0m
|
||||
[36m(06) :[0m [34mtouch-copy 2001-01-02 {datadir/}file10.txt {path2/}[0m
|
||||
|
||||
[36m(07) :[0m [34mtest newer on path2 - file1[0m
|
||||
[36m(08) :[0m [34mtouch-copy 2001-01-02 {datadir/}file1.txt {path2/}[0m
|
||||
|
||||
[36m(09) :[0m [34mtest new on path1 - file11[0m
|
||||
[36m(10) :[0m [34mtouch-copy 2001-01-02 {datadir/}file11.txt {path1/}[0m
|
||||
|
||||
[36m(11) :[0m [34mtest newer on path1 - file2[0m
|
||||
[36m(12) :[0m [34mtouch-copy 2001-01-02 {datadir/}file2.txt {path1/}[0m
|
||||
|
||||
[36m(13) :[0m [34mtest deleted on path2 - file3[0m
|
||||
[36m(14) :[0m [34mdelete-file {path2/}file3.txt[0m
|
||||
|
||||
[36m(15) :[0m [34mtest deleted on path1 - file4[0m
|
||||
[36m(16) :[0m [34mdelete-file {path1/}file4.txt[0m
|
||||
|
||||
[36m(17) :[0m [34mtest deleted on both paths - file8[0m
|
||||
[36m(18) :[0m [34mdelete-file {path1/}file8.txt[0m
|
||||
[36m(19) :[0m [34mdelete-file {path2/}file8.txt[0m
|
||||
|
||||
[36m(20) :[0m [34mtest changed on both paths - file5 (file5R, file5L)[0m
|
||||
[36m(21) :[0m [34mtouch-glob 2001-01-02 {datadir/} file5R.txt[0m
|
||||
[36m(22) :[0m [34mcopy-as {datadir/}file5R.txt {path2/} file5.txt[0m
|
||||
[36m(23) :[0m [34mtouch-glob 2001-03-04 {datadir/} file5L.txt[0m
|
||||
[36m(24) :[0m [34mcopy-as {datadir/}file5L.txt {path1/} file5.txt[0m
|
||||
|
||||
[36m(25) :[0m [34mtest newer on path2 and deleted on path1 - file6[0m
|
||||
[36m(26) :[0m [34mtouch-copy 2001-01-02 {datadir/}file6.txt {path2/}[0m
|
||||
[36m(27) :[0m [34mdelete-file {path1/}file6.txt[0m
|
||||
|
||||
[36m(28) :[0m [34mtest newer on path1 and deleted on path2 - file7[0m
|
||||
[36m(29) :[0m [34mtouch-copy 2001-01-02 {datadir/}file7.txt {path1/}[0m
|
||||
[36m(30) :[0m [34mdelete-file {path2/}file7.txt[0m
|
||||
|
||||
[36m(31) :[0m [34mtest bisync run[0m
|
||||
[36m(32) :[0m [34mbisync backupdir1={workdir/}backupdirs/backupdir1 backupdir2={workdir/}backupdirs/backupdir2[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[31mFile was deleted[0m[0m - [36mfile4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[31mFile was deleted[0m[0m - [36mfile6.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile7.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[31mFile was deleted[0m[0m - [36mfile8.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mFile is new[0m[0m - [36mfile11.txt[0m
|
||||
INFO : Path1: 7 changes: [32m 1 new[0m, [33m 3 modified[0m, [31m 3 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 3 newer[0m, [34m 0 older[0m, [36m 3 larger[0m, [34m 0 smaller[0m)
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : - [34mPath2[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[31mFile was deleted[0m[0m - [36mfile3.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile5.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile6.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[31mFile was deleted[0m[0m - [36mfile7.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[31mFile was deleted[0m[0m - [36mfile8.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[32mFile is new[0m[0m - [36mfile10.txt[0m
|
||||
INFO : Path2: 7 changes: [32m 1 new[0m, [33m 3 modified[0m, [31m 3 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 3 newer[0m, [34m 0 older[0m, [36m 3 larger[0m, [34m 0 smaller[0m)
|
||||
INFO : Applying changes
|
||||
INFO : Checking potential conflicts...
|
||||
ERROR : file5.txt: md5 differ
|
||||
NOTICE: {path2String}: 1 differences found
|
||||
NOTICE: {path2String}: 1 errors while checking
|
||||
INFO : Finished checking the potential conflicts. 1 differences found
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file11.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file2.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}file4.txt[0m
|
||||
NOTICE: - [34mWARNING[0m [35mNew or changed in both paths[0m - [36mfile5.txt[0m
|
||||
NOTICE: - [36mPath1[0m [35mRenaming Path1 copy[0m - [36m{path1/}file5.txt.conflict1[0m
|
||||
NOTICE: - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file5.txt.conflict1[0m
|
||||
NOTICE: - [34mPath2[0m [35mRenaming Path2 copy[0m - [36m{path2/}file5.txt.conflict2[0m
|
||||
NOTICE: - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file5.txt.conflict2[0m
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file6.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file7.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file1.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file10.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{path1/}file3.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
[36m(33) :[0m [34mbisync backupdir1={workdir/}backupdirs/backupdir1 backupdir2={workdir/}backupdirs/backupdir2[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(34) :[0m [34mtest overlapping path -- should fail[0m
|
||||
[36m(35) :[0m [34mbisync backupdir1={path1/}subdir/backupdir1 backupdir2={path2/}subdir/backupdir2[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
ERROR : [31mBisync critical error: destination and parameter to --backup-dir mustn't overlap[0m
|
||||
ERROR : [31mBisync aborted. Must run --resync to recover.[0m
|
||||
Bisync error: bisync aborted
|
|
@ -1 +0,0 @@
|
|||
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.
|
|
@ -1 +0,0 @@
|
|||
This file is newer
|
|
@ -1 +0,0 @@
|
|||
This file is newer
|
|
@ -1 +0,0 @@
|
|||
This file is newer
|
|
@ -1 +0,0 @@
|
|||
Newer version
|
|
@ -1 +0,0 @@
|
|||
This file is newer and not equal to 5R
|
|
@ -1 +0,0 @@
|
|||
This file is newer and not equal to 5L
|
|
@ -1 +0,0 @@
|
|||
This file is newer
|
|
@ -1 +0,0 @@
|
|||
This file is newer
|
|
@ -1,59 +0,0 @@
|
|||
test backupdir
|
||||
# Exercise all of the various file change scenarios
|
||||
# - New on Path2 file10
|
||||
# - Newer on Path2 file1
|
||||
# - New on Path1 file11
|
||||
# - Newer on Path1 file2
|
||||
# - Deleted on Path2 file3
|
||||
# - Deleted on Path1 file4
|
||||
# - Changed on Path2 and on Path1 file5 (file5r, file5l)
|
||||
# - Newer on Path2 and deleted on Path1 file6
|
||||
# - Newer on Path1 and deleted on Path2 file7
|
||||
# - Deleted on both paths file8
|
||||
|
||||
test initial bisync
|
||||
bisync resync backupdir1={workdir/}backupdirs/backupdir1 backupdir2={workdir/}backupdirs/backupdir2
|
||||
|
||||
test make modifications on both paths
|
||||
test new on path2 - file10
|
||||
touch-copy 2001-01-02 {datadir/}file10.txt {path2/}
|
||||
|
||||
test newer on path2 - file1
|
||||
touch-copy 2001-01-02 {datadir/}file1.txt {path2/}
|
||||
|
||||
test new on path1 - file11
|
||||
touch-copy 2001-01-02 {datadir/}file11.txt {path1/}
|
||||
|
||||
test newer on path1 - file2
|
||||
touch-copy 2001-01-02 {datadir/}file2.txt {path1/}
|
||||
|
||||
test deleted on path2 - file3
|
||||
delete-file {path2/}file3.txt
|
||||
|
||||
test deleted on path1 - file4
|
||||
delete-file {path1/}file4.txt
|
||||
|
||||
test deleted on both paths - file8
|
||||
delete-file {path1/}file8.txt
|
||||
delete-file {path2/}file8.txt
|
||||
|
||||
test changed on both paths - file5 (file5R, file5L)
|
||||
touch-glob 2001-01-02 {datadir/} file5R.txt
|
||||
copy-as {datadir/}file5R.txt {path2/} file5.txt
|
||||
touch-glob 2001-03-04 {datadir/} file5L.txt
|
||||
copy-as {datadir/}file5L.txt {path1/} file5.txt
|
||||
|
||||
test newer on path2 and deleted on path1 - file6
|
||||
touch-copy 2001-01-02 {datadir/}file6.txt {path2/}
|
||||
delete-file {path1/}file6.txt
|
||||
|
||||
test newer on path1 and deleted on path2 - file7
|
||||
touch-copy 2001-01-02 {datadir/}file7.txt {path1/}
|
||||
delete-file {path2/}file7.txt
|
||||
|
||||
test bisync run
|
||||
bisync backupdir1={workdir/}backupdirs/backupdir1 backupdir2={workdir/}backupdirs/backupdir2
|
||||
bisync backupdir1={workdir/}backupdirs/backupdir1 backupdir2={workdir/}backupdirs/backupdir2
|
||||
|
||||
test overlapping path -- should fail
|
||||
bisync backupdir1={path1/}subdir/backupdir1 backupdir2={path2/}subdir/backupdir2
|
|
@ -1,9 +1,9 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "subdir/file20.txt"
|
|
@ -1,9 +1,9 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "subdir/file20.txt"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue