From a1cfe61ffd79330f3c659766fed415f00de0f1dd Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Mon, 17 Jun 2019 17:50:58 +0100 Subject: [PATCH] googlephotos: Backend for accessing Google Photos #369 --- README.md | 1 + backend/all/all.go | 1 + backend/googlephotos/albums.go | 148 +++ backend/googlephotos/albums_test.go | 311 ++++++ backend/googlephotos/api/types.go | 190 ++++ backend/googlephotos/googlephotos.go | 962 ++++++++++++++++++ backend/googlephotos/googlephotos_test.go | 306 ++++++ backend/googlephotos/pattern.go | 335 ++++++ backend/googlephotos/pattern_test.go | 495 +++++++++ .../testfiles/rclone-test-image1.jpg | Bin 0 -> 16552 bytes .../testfiles/rclone-test-image2.jpg | Bin 0 -> 16765 bytes bin/make_manual.py | 1 + docs/content/about.md | 1 + docs/content/docs.md | 1 + docs/content/googlephotos.md | 361 +++++++ docs/content/overview.md | 2 + docs/layouts/chrome/navbar.html | 1 + fstest/test_all/config.yaml | 4 + 18 files changed, 3120 insertions(+) create mode 100644 backend/googlephotos/albums.go create mode 100644 backend/googlephotos/albums_test.go create mode 100644 backend/googlephotos/api/types.go create mode 100644 backend/googlephotos/googlephotos.go create mode 100644 backend/googlephotos/googlephotos_test.go create mode 100644 backend/googlephotos/pattern.go create mode 100644 backend/googlephotos/pattern_test.go create mode 100644 backend/googlephotos/testfiles/rclone-test-image1.jpg create mode 100644 backend/googlephotos/testfiles/rclone-test-image2.jpg create mode 100644 docs/content/googlephotos.md diff --git a/README.md b/README.md index b3338acfd..f39d84720 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and * FTP [:page_facing_up:](https://rclone.org/ftp/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Drive [:page_facing_up:](https://rclone.org/drive/) + * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) * HTTP [:page_facing_up:](https://rclone.org/http/) * Hubic [:page_facing_up:](https://rclone.org/hubic/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) diff --git a/backend/all/all.go b/backend/all/all.go index 5f4a0f086..a8386f4f0 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -14,6 +14,7 @@ import ( _ "github.com/ncw/rclone/backend/fichier" _ "github.com/ncw/rclone/backend/ftp" _ "github.com/ncw/rclone/backend/googlecloudstorage" + _ "github.com/ncw/rclone/backend/googlephotos" _ "github.com/ncw/rclone/backend/http" _ "github.com/ncw/rclone/backend/hubic" _ "github.com/ncw/rclone/backend/jottacloud" diff --git a/backend/googlephotos/albums.go b/backend/googlephotos/albums.go new file mode 100644 index 000000000..79ea76728 --- /dev/null +++ b/backend/googlephotos/albums.go @@ -0,0 +1,148 @@ +// This file contains the albums abstraction + +package googlephotos + +import ( + "path" + "strings" + "sync" + + "github.com/ncw/rclone/backend/googlephotos/api" +) + +// All the albums +type albums struct { + mu sync.Mutex + dupes map[string][]*api.Album // duplicated names + byID map[string]*api.Album //..indexed by ID + byTitle map[string]*api.Album //..indexed by Title + path map[string][]string // partial album names to directory +} + +// Create a new album +func newAlbums() *albums { + return &albums{ + dupes: map[string][]*api.Album{}, + byID: map[string]*api.Album{}, + byTitle: map[string]*api.Album{}, + path: map[string][]string{}, + } +} + +// add an album +func (as *albums) add(album *api.Album) { + // Munge the name of the album into a sensible path name + album.Title = path.Clean(album.Title) + if album.Title == "." || album.Title == "/" { + album.Title = addID("", album.ID) + } + + as.mu.Lock() + as._add(album) + as.mu.Unlock() +} + +// _add an album - call with lock held +func (as *albums) _add(album *api.Album) { + // update dupes by title + dupes := as.dupes[album.Title] + dupes = append(dupes, album) + as.dupes[album.Title] = dupes + + // Dedupe the album name if necessary + if len(dupes) >= 2 { + // If this is the first dupe, then need to adjust the first one + if len(dupes) == 2 { + firstAlbum := dupes[0] + as._del(firstAlbum) + as._add(firstAlbum) + // undo add of firstAlbum to dupes + as.dupes[album.Title] = dupes + } + album.Title = addID(album.Title, album.ID) + } + + // Store the new album + as.byID[album.ID] = album + as.byTitle[album.Title] = album + + // Store the partial paths + dir, leaf := album.Title, "" + for dir != "" { + i := strings.LastIndex(dir, "/") + if i >= 0 { + dir, leaf = dir[:i], dir[i+1:] + } else { + dir, leaf = "", dir + } + dirs := as.path[dir] + found := false + for _, dir := range dirs { + if dir == leaf { + found = true + } + } + if !found { + as.path[dir] = append(as.path[dir], leaf) + } + } +} + +// del an album +func (as *albums) del(album *api.Album) { + as.mu.Lock() + as._del(album) + as.mu.Unlock() +} + +// _del an album - call with lock held +func (as *albums) _del(album *api.Album) { + // We leave in dupes so it doesn't cause albums to get renamed + + // Remove from byID and byTitle + delete(as.byID, album.ID) + delete(as.byTitle, album.Title) + + // Remove from paths + dir, leaf := album.Title, "" + for dir != "" { + // Can't delete if this dir exists anywhere in the path structure + if _, found := as.path[dir]; found { + break + } + i := strings.LastIndex(dir, "/") + if i >= 0 { + dir, leaf = dir[:i], dir[i+1:] + } else { + dir, leaf = "", dir + } + dirs := as.path[dir] + for i, dir := range dirs { + if dir == leaf { + dirs = append(dirs[:i], dirs[i+1:]...) + break + } + } + if len(dirs) == 0 { + delete(as.path, dir) + } else { + as.path[dir] = dirs + } + } +} + +// get an album by title +func (as *albums) get(title string) (album *api.Album, ok bool) { + as.mu.Lock() + defer as.mu.Unlock() + album, ok = as.byTitle[title] + return album, ok +} + +// getDirs gets directories below an album path +func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) { + as.mu.Lock() + defer as.mu.Unlock() + dirs, ok = as.path[albumPath] + return dirs, ok +} diff --git a/backend/googlephotos/albums_test.go b/backend/googlephotos/albums_test.go new file mode 100644 index 000000000..2ffb4e8af --- /dev/null +++ b/backend/googlephotos/albums_test.go @@ -0,0 +1,311 @@ +package googlephotos + +import ( + "testing" + + "github.com/ncw/rclone/backend/googlephotos/api" + "github.com/stretchr/testify/assert" +) + +func TestNewAlbums(t *testing.T) { + albums := newAlbums() + assert.NotNil(t, albums.dupes) + assert.NotNil(t, albums.byID) + assert.NotNil(t, albums.byTitle) + assert.NotNil(t, albums.path) +} + +func TestAlbumsAdd(t *testing.T) { + albums := newAlbums() + + assert.Equal(t, map[string][]*api.Album{}, albums.dupes) + assert.Equal(t, map[string]*api.Album{}, albums.byID) + assert.Equal(t, map[string]*api.Album{}, albums.byTitle) + assert.Equal(t, map[string][]string{}, albums.path) + + a1 := &api.Album{ + Title: "one", + ID: "1", + } + albums.add(a1) + + assert.Equal(t, map[string][]*api.Album{ + "one": []*api.Album{a1}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{ + "1": a1, + }, albums.byID) + assert.Equal(t, map[string]*api.Album{ + "one": a1, + }, albums.byTitle) + assert.Equal(t, map[string][]string{ + "": []string{"one"}, + }, albums.path) + + a2 := &api.Album{ + Title: "two", + ID: "2", + } + albums.add(a2) + + assert.Equal(t, map[string][]*api.Album{ + "one": []*api.Album{a1}, + "two": []*api.Album{a2}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{ + "1": a1, + "2": a2, + }, albums.byID) + assert.Equal(t, map[string]*api.Album{ + "one": a1, + "two": a2, + }, albums.byTitle) + assert.Equal(t, map[string][]string{ + "": []string{"one", "two"}, + }, albums.path) + + // Add a duplicate + a2a := &api.Album{ + Title: "two", + ID: "2a", + } + albums.add(a2a) + + assert.Equal(t, map[string][]*api.Album{ + "one": []*api.Album{a1}, + "two": []*api.Album{a2, a2a}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{ + "1": a1, + "2": a2, + "2a": a2a, + }, albums.byID) + assert.Equal(t, map[string]*api.Album{ + "one": a1, + "two {2}": a2, + "two {2a}": a2a, + }, albums.byTitle) + assert.Equal(t, map[string][]string{ + "": []string{"one", "two {2}", "two {2a}"}, + }, albums.path) + + // Add a sub directory + a1sub := &api.Album{ + Title: "one/sub", + ID: "1sub", + } + albums.add(a1sub) + + assert.Equal(t, map[string][]*api.Album{ + "one": []*api.Album{a1}, + "two": []*api.Album{a2, a2a}, + "one/sub": []*api.Album{a1sub}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{ + "1": a1, + "2": a2, + "2a": a2a, + "1sub": a1sub, + }, albums.byID) + assert.Equal(t, map[string]*api.Album{ + "one": a1, + "one/sub": a1sub, + "two {2}": a2, + "two {2a}": a2a, + }, albums.byTitle) + assert.Equal(t, map[string][]string{ + "": []string{"one", "two {2}", "two {2a}"}, + "one": []string{"sub"}, + }, albums.path) + + // Add a weird path + a0 := &api.Album{ + Title: "/../././..////.", + ID: "0", + } + albums.add(a0) + + assert.Equal(t, map[string][]*api.Album{ + "{0}": []*api.Album{a0}, + "one": []*api.Album{a1}, + "two": []*api.Album{a2, a2a}, + "one/sub": []*api.Album{a1sub}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{ + "0": a0, + "1": a1, + "2": a2, + "2a": a2a, + "1sub": a1sub, + }, albums.byID) + assert.Equal(t, map[string]*api.Album{ + "{0}": a0, + "one": a1, + "one/sub": a1sub, + "two {2}": a2, + "two {2a}": a2a, + }, albums.byTitle) + assert.Equal(t, map[string][]string{ + "": []string{"one", "two {2}", "two {2a}", "{0}"}, + "one": []string{"sub"}, + }, albums.path) +} + +func TestAlbumsDel(t *testing.T) { + albums := newAlbums() + + a1 := &api.Album{ + Title: "one", + ID: "1", + } + albums.add(a1) + + a2 := &api.Album{ + Title: "two", + ID: "2", + } + albums.add(a2) + + // Add a duplicate + a2a := &api.Album{ + Title: "two", + ID: "2a", + } + albums.add(a2a) + + // Add a sub directory + a1sub := &api.Album{ + Title: "one/sub", + ID: "1sub", + } + albums.add(a1sub) + + assert.Equal(t, map[string][]*api.Album{ + "one": []*api.Album{a1}, + "two": []*api.Album{a2, a2a}, + "one/sub": []*api.Album{a1sub}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{ + "1": a1, + "2": a2, + "2a": a2a, + "1sub": a1sub, + }, albums.byID) + assert.Equal(t, map[string]*api.Album{ + "one": a1, + "one/sub": a1sub, + "two {2}": a2, + "two {2a}": a2a, + }, albums.byTitle) + assert.Equal(t, map[string][]string{ + "": []string{"one", "two {2}", "two {2a}"}, + "one": []string{"sub"}, + }, albums.path) + + albums.del(a1) + + assert.Equal(t, map[string][]*api.Album{ + "one": []*api.Album{a1}, + "two": []*api.Album{a2, a2a}, + "one/sub": []*api.Album{a1sub}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{ + "2": a2, + "2a": a2a, + "1sub": a1sub, + }, albums.byID) + assert.Equal(t, map[string]*api.Album{ + "one/sub": a1sub, + "two {2}": a2, + "two {2a}": a2a, + }, albums.byTitle) + assert.Equal(t, map[string][]string{ + "": []string{"one", "two {2}", "two {2a}"}, + "one": []string{"sub"}, + }, albums.path) + + albums.del(a2) + + assert.Equal(t, map[string][]*api.Album{ + "one": []*api.Album{a1}, + "two": []*api.Album{a2, a2a}, + "one/sub": []*api.Album{a1sub}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{ + "2a": a2a, + "1sub": a1sub, + }, albums.byID) + assert.Equal(t, map[string]*api.Album{ + "one/sub": a1sub, + "two {2a}": a2a, + }, albums.byTitle) + assert.Equal(t, map[string][]string{ + "": []string{"one", "two {2a}"}, + "one": []string{"sub"}, + }, albums.path) + + albums.del(a2a) + + assert.Equal(t, map[string][]*api.Album{ + "one": []*api.Album{a1}, + "two": []*api.Album{a2, a2a}, + "one/sub": []*api.Album{a1sub}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{ + "1sub": a1sub, + }, albums.byID) + assert.Equal(t, map[string]*api.Album{ + "one/sub": a1sub, + }, albums.byTitle) + assert.Equal(t, map[string][]string{ + "": []string{"one"}, + "one": []string{"sub"}, + }, albums.path) + + albums.del(a1sub) + + assert.Equal(t, map[string][]*api.Album{ + "one": []*api.Album{a1}, + "two": []*api.Album{a2, a2a}, + "one/sub": []*api.Album{a1sub}, + }, albums.dupes) + assert.Equal(t, map[string]*api.Album{}, albums.byID) + assert.Equal(t, map[string]*api.Album{}, albums.byTitle) + assert.Equal(t, map[string][]string{}, albums.path) +} + +func TestAlbumsGet(t *testing.T) { + albums := newAlbums() + + a1 := &api.Album{ + Title: "one", + ID: "1", + } + albums.add(a1) + + album, ok := albums.get("one") + assert.Equal(t, true, ok) + assert.Equal(t, a1, album) + + album, ok = albums.get("notfound") + assert.Equal(t, false, ok) + assert.Nil(t, album) +} + +func TestAlbumsGetDirs(t *testing.T) { + albums := newAlbums() + + a1 := &api.Album{ + Title: "one", + ID: "1", + } + albums.add(a1) + + dirs, ok := albums.getDirs("") + assert.Equal(t, true, ok) + assert.Equal(t, []string{"one"}, dirs) + + dirs, ok = albums.getDirs("notfound") + assert.Equal(t, false, ok) + assert.Nil(t, dirs) +} diff --git a/backend/googlephotos/api/types.go b/backend/googlephotos/api/types.go new file mode 100644 index 000000000..46b338448 --- /dev/null +++ b/backend/googlephotos/api/types.go @@ -0,0 +1,190 @@ +package api + +import ( + "fmt" + "time" +) + +// ErrorDetails in the internals of the Error type +type ErrorDetails struct { + Code int `json:"code"` + Message string `json:"message"` + Status string `json:"status"` +} + +// Error is returned on errors +type Error struct { + Details ErrorDetails `json:"error"` +} + +// Error statisfies error interface +func (e *Error) Error() string { + return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status) +} + +// Album of photos +type Album struct { + ID string `json:"id,omitempty"` + Title string `json:"title"` + ProductURL string `json:"productUrl,omitempty"` + MediaItemsCount string `json:"mediaItemsCount,omitempty"` + CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"` + CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"` + IsWriteable bool `json:"isWriteable,omitempty"` +} + +// ListAlbums is returned from albums.list and sharedAlbums.list +type ListAlbums struct { + Albums []Album `json:"albums"` + SharedAlbums []Album `json:"sharedAlbums"` + NextPageToken string `json:"nextPageToken"` +} + +// CreateAlbum creates an Album +type CreateAlbum struct { + Album *Album `json:"album"` +} + +// MediaItem is a photo or video +type MediaItem struct { + ID string `json:"id"` + ProductURL string `json:"productUrl"` + BaseURL string `json:"baseUrl"` + MimeType string `json:"mimeType"` + MediaMetadata struct { + CreationTime time.Time `json:"creationTime"` + Width string `json:"width"` + Height string `json:"height"` + Photo struct { + } `json:"photo"` + } `json:"mediaMetadata"` + Filename string `json:"filename"` +} + +// MediaItems is returned from mediaitems.list, mediaitems.search +type MediaItems struct { + MediaItems []MediaItem `json:"mediaItems"` + NextPageToken string `json:"nextPageToken"` +} + +//Content categories +// NONE Default content category. This category is ignored when any other category is used in the filter. +// LANDSCAPES Media items containing landscapes. +// RECEIPTS Media items containing receipts. +// CITYSCAPES Media items containing cityscapes. +// LANDMARKS Media items containing landmarks. +// SELFIES Media items that are selfies. +// PEOPLE Media items containing people. +// PETS Media items containing pets. +// WEDDINGS Media items from weddings. +// BIRTHDAYS Media items from birthdays. +// DOCUMENTS Media items containing documents. +// TRAVEL Media items taken during travel. +// ANIMALS Media items containing animals. +// FOOD Media items containing food. +// SPORT Media items from sporting events. +// NIGHT Media items taken at night. +// PERFORMANCES Media items from performances. +// WHITEBOARDS Media items containing whiteboards. +// SCREENSHOTS Media items that are screenshots. +// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc. +// ARTS Media items containing art. +// CRAFTS Media items containing crafts. +// FASHION Media items related to fashion. +// HOUSES Media items containing houses. +// GARDENS Media items containing gardens. +// FLOWERS Media items containing flowers. +// HOLIDAYS Media items taken of holidays. + +// MediaTypes +// ALL_MEDIA Treated as if no filters are applied. All media types are included. +// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app. +// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres. + +// Features +// NONE Treated as if no filters are applied. All features are included. +// FAVORITES Media items that the user has marked as favorites in the Google Photos app. + +// Date is used as part of SearchFilter +type Date struct { + Year int `json:"year,omitempty"` + Month int `json:"month,omitempty"` + Day int `json:"day,omitempty"` +} + +// DateFilter is uses to add date ranges to media item queries +type DateFilter struct { + Dates []Date `json:"dates,omitempty"` + Ranges []struct { + StartDate Date `json:"startDate,omitempty"` + EndDate Date `json:"endDate,omitempty"` + } `json:"ranges,omitempty"` +} + +// ContentFilter is uses to add content categories to media item queries +type ContentFilter struct { + IncludedContentCategories []string `json:"includedContentCategories,omitempty"` + ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"` +} + +// MediaTypeFilter is uses to add media types to media item queries +type MediaTypeFilter struct { + MediaTypes []string `json:"mediaTypes,omitempty"` +} + +// FeatureFilter is uses to add features to media item queries +type FeatureFilter struct { + IncludedFeatures []string `json:"includedFeatures,omitempty"` +} + +// Filters combines all the filter types for media item queries +type Filters struct { + DateFilter *DateFilter `json:"dateFilter,omitempty"` + ContentFilter *ContentFilter `json:"contentFilter,omitempty"` + MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"` + FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"` + IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"` + ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"` +} + +// SearchFilter is uses with mediaItems.search +type SearchFilter struct { + AlbumID string `json:"albumId,omitempty"` + PageSize int `json:"pageSize"` + PageToken string `json:"pageToken,omitempty"` + Filters *Filters `json:"filters,omitempty"` +} + +// SimpleMediaItem is part of NewMediaItem +type SimpleMediaItem struct { + UploadToken string `json:"uploadToken"` +} + +// NewMediaItem is a single media item for upload +type NewMediaItem struct { + Description string `json:"description"` + SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"` +} + +// BatchCreateRequest creates media items from upload tokens +type BatchCreateRequest struct { + AlbumID string `json:"albumId,omitempty"` + NewMediaItems []NewMediaItem `json:"newMediaItems"` +} + +// BatchCreateResponse is returned from BatchCreateRequest +type BatchCreateResponse struct { + NewMediaItemResults []struct { + UploadToken string `json:"uploadToken"` + Status struct { + Message string `json:"message"` + Code int `json:"code"` + } `json:"status"` + MediaItem MediaItem `json:"mediaItem"` + } `json:"newMediaItemResults"` +} + +// BatchRemoveItems is for removing items from an album +type BatchRemoveItems struct { + MediaItemIds []string `json:"mediaItemIds"` +} diff --git a/backend/googlephotos/googlephotos.go b/backend/googlephotos/googlephotos.go new file mode 100644 index 000000000..49d9fac66 --- /dev/null +++ b/backend/googlephotos/googlephotos.go @@ -0,0 +1,962 @@ +// Package googlephotos provides an interface to Google Photos +package googlephotos + +// FIXME Resumable uploads not implemented - rclone can't resume uploads in general + +import ( + "context" + "encoding/json" + "fmt" + "io" + golog "log" + "net/http" + "net/url" + "path" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/ncw/rclone/backend/googlephotos/api" + "github.com/ncw/rclone/fs" + "github.com/ncw/rclone/fs/config" + "github.com/ncw/rclone/fs/config/configmap" + "github.com/ncw/rclone/fs/config/configstruct" + "github.com/ncw/rclone/fs/config/obscure" + "github.com/ncw/rclone/fs/dirtree" + "github.com/ncw/rclone/fs/fserrors" + "github.com/ncw/rclone/fs/hash" + "github.com/ncw/rclone/fs/log" + "github.com/ncw/rclone/lib/oauthutil" + "github.com/ncw/rclone/lib/pacer" + "github.com/ncw/rclone/lib/rest" + "github.com/pkg/errors" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +var ( + errCantUpload = errors.New("can't upload files here") + errCantMkdir = errors.New("can't make directories here") + errCantRmdir = errors.New("can't remove this directory") + errAlbumDelete = errors.New("google photos API does not implement deleting albums") + errRemove = errors.New("google photos API only implements removing files from albums") + errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created") +) + +const ( + rcloneClientID = "202264815644-rt1o1c9evjaotbpbab10m83i8cnjk077.apps.googleusercontent.com" + rcloneEncryptedClientSecret = "kLJLretPefBgrDHosdml_nlF64HZ9mUcO85X5rdjYBPP8ChA-jr3Ow" + rootURL = "https://photoslibrary.googleapis.com/v1" + listChunks = 100 // chunk size to read directory listings + albumChunks = 50 // chunk size to read album listings + minSleep = 10 * time.Millisecond + scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly" + scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary" +) + +var ( + // Description of how to auth for this app + oauthConfig = &oauth2.Config{ + Scopes: []string{ + scopeReadWrite, + }, + Endpoint: google.Endpoint, + ClientID: rcloneClientID, + ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), + RedirectURL: oauthutil.TitleBarRedirectURL, + } +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "google photos", + Prefix: "gphotos", + Description: "Google Photos", + NewFs: NewFs, + Config: func(name string, m configmap.Mapper) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + fs.Errorf(nil, "Couldn't parse config into struct: %v", err) + return + } + + // Fill in the scopes + if opt.ReadOnly { + oauthConfig.Scopes[0] = scopeReadOnly + } else { + oauthConfig.Scopes[0] = scopeReadWrite + } + + // Do the oauth + err = oauthutil.Config("google photos", name, m, oauthConfig) + if err != nil { + golog.Fatalf("Failed to configure token: %v", err) + } + + // Warn the user + fmt.Print(` +*** IMPORTANT: All media items uploaded to Google Photos with rclone +*** are stored in full resolution at original quality. These uploads +*** will count towards storage in your Google Account. + +`) + + }, + Options: []fs.Option{{ + Name: config.ConfigClientID, + Help: "Google Application Client Id\nLeave blank normally.", + }, { + Name: config.ConfigClientSecret, + Help: "Google Application Client Secret\nLeave blank normally.", + }, { + Name: "read_only", + Default: false, + Help: `Set to make the Google Photos backend read only. + +If you choose read only then rclone will only request read only access +to your photos, otherwise rclone will request full access.`, + }, { + Name: "read_size", + Default: false, + Help: `Set to read the size of media items. + +Normally rclone does not read the size of media items since this takes +another transaction. This isn't necessary for syncing. However +rclone mount needs to know the size of files in advance of reading +them, so setting this flag when using rclone mount is recommended if +you want to read the media.`, + Advanced: true, + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + ReadOnly bool `config:"read_only"` + ReadSize bool `config:"read_size"` +} + +// Fs represents a remote storage server +type Fs struct { + name string // name of this remote + root string // the path we are working on if any + opt Options // parsed options + features *fs.Features // optional features + srv *rest.Client // the connection to the one drive server + pacer *fs.Pacer // To pace the API calls + startTime time.Time // time Fs was started - used for datestamps + albums map[bool]*albums // albums, shared or not + uploadedMu sync.Mutex // to protect the below + uploaded dirtree.DirTree // record of uploaded items +} + +// Object describes a storage object +// +// Will definitely have info but maybe not meta +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + url string // download path + id string // ID of this object + bytes int64 // Bytes in the object + modTime time.Time // Modified time of the object + mimeType string +} + +// ------------------------------------------------------------ + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("Google Photos path %q", f.root) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// dirTime returns the time to set a directory to +func (f *Fs) dirTime() time.Time { + return f.startTime +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout + 509, // Bandwidth Limit Exceeded +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func shouldRetry(resp *http.Response, err error) (bool, error) { + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +// errorHandler parses a non 2xx error response into an error +func errorHandler(resp *http.Response) error { + body, err := rest.ReadBody(resp) + if err != nil { + body = nil + } + var e = api.Error{ + Details: api.ErrorDetails{ + Code: resp.StatusCode, + Message: string(body), + Status: resp.Status, + }, + } + if body != nil { + _ = json.Unmarshal(body, &e) + } + return &e +} + +// NewFs constructs an Fs from the path, bucket:path +func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to configure Box") + } + + root = strings.Trim(path.Clean(root), "/") + if root == "." || root == "/" { + root = "" + } + f := &Fs{ + name: name, + root: root, + opt: *opt, + srv: rest.NewClient(oAuthClient).SetRoot(rootURL), + pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))), + startTime: time.Now(), + albums: map[bool]*albums{}, + uploaded: dirtree.New(), + } + f.features = (&fs.Features{ + ReadMimeType: true, + }).Fill(f) + f.srv.SetErrorHandler(errorHandler) + + _, _, pattern := patterns.match(f.root, "", true) + if pattern != nil && pattern.isFile { + oldRoot := f.root + var leaf string + f.root, leaf = path.Split(f.root) + f.root = strings.TrimRight(f.root, "/") + _, err := f.NewObject(context.TODO(), leaf) + if err == nil { + return f, fs.ErrorIsFile + } + f.root = oldRoot + } + return f, nil +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.MediaItem) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + if info != nil { + o.setMetaData(info) + } else { + err := o.readMetaData(ctx) // reads info and meta, returning an error + if err != nil { + return nil, err + } + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + defer log.Trace(f, "remote=%q", remote)("") + return f.newObjectWithInfo(ctx, remote, nil) +} + +// addID adds the ID to name +func addID(name string, ID string) string { + idStr := "{" + ID + "}" + if name == "" { + return idStr + } + return name + " " + idStr +} + +// addFileID adds the ID to the fileName passed in +func addFileID(fileName string, ID string) string { + ext := path.Ext(fileName) + base := fileName[:len(fileName)-len(ext)] + return addID(base, ID) + ext +} + +var idRe = regexp.MustCompile(`\{([A-Za-z0-9_-]{55,})\}`) + +// findID finds an ID in string if one is there or "" +func findID(name string) string { + match := idRe.FindStringSubmatch(name) + if match == nil { + return "" + } + return match[1] +} + +// list the albums into an internal cache +// FIXME cache invalidation +func (f *Fs) listAlbums(shared bool) (all *albums, err error) { + all, ok := f.albums[shared] + if ok && all != nil { + return all, nil + } + opts := rest.Opts{ + Method: "GET", + Path: "/albums", + Parameters: url.Values{}, + } + if shared { + opts.Path = "/sharedAlbums" + } + all = newAlbums() + opts.Parameters.Set("pageSize", strconv.Itoa(albumChunks)) + lastID := "" + for { + var result api.ListAlbums + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, nil, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return nil, errors.Wrap(err, "couldn't list albums") + } + newAlbums := result.Albums + if shared { + newAlbums = result.SharedAlbums + } + if len(newAlbums) > 0 && newAlbums[0].ID == lastID { + // skip first if ID duplicated from last page + newAlbums = newAlbums[1:] + } + if len(newAlbums) > 0 { + lastID = newAlbums[len(newAlbums)-1].ID + } + for i := range newAlbums { + all.add(&newAlbums[i]) + } + if result.NextPageToken == "" { + break + } + opts.Parameters.Set("pageToken", result.NextPageToken) + } + f.albums[shared] = all + return all, nil +} + +// listFn is called from list to handle an object. +type listFn func(remote string, object *api.MediaItem, isDirectory bool) error + +// list the objects into the function supplied +// +// dir is the starting directory, "" for root +// +// Set recurse to read sub directories +func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) { + opts := rest.Opts{ + Method: "POST", + Path: "/mediaItems:search", + } + filter.PageSize = listChunks + filter.PageToken = "" + lastID := "" + for { + var result api.MediaItems + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, &filter, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "couldn't list files") + } + items := result.MediaItems + if len(items) > 0 && items[0].ID == lastID { + // skip first if ID duplicated from last page + items = items[1:] + } + if len(items) > 0 { + lastID = items[len(items)-1].ID + } + for i := range items { + item := &result.MediaItems[i] + remote := item.Filename + remote = strings.Replace(remote, "/", "/", -1) + err = fn(remote, item, false) + if err != nil { + return err + } + } + if result.NextPageToken == "" { + break + } + filter.PageToken = result.NextPageToken + } + + return nil +} + +// Convert a list item into a DirEntry +func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaItem, isDirectory bool) (fs.DirEntry, error) { + if isDirectory { + d := fs.NewDir(remote, f.dirTime()) + return d, nil + } + o := &Object{ + fs: f, + remote: remote, + } + o.setMetaData(item) + return o, nil +} + +// listDir lists a single directory +func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) { + // List the objects + err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error { + entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory) + if err != nil { + return err + } + if entry != nil { + entries = append(entries, entry) + } + return nil + }) + if err != nil { + return nil, err + } + // Dedupe the file names + dupes := map[string]int{} + for _, entry := range entries { + o, ok := entry.(*Object) + if ok { + dupes[o.remote]++ + } + } + for _, entry := range entries { + o, ok := entry.(*Object) + if ok { + duplicated := dupes[o.remote] > 1 + if duplicated || o.remote == "" { + o.remote = addFileID(o.remote, o.id) + } + } + } + return entries, err +} + +// listUploads lists a single directory from the uploads +func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + f.uploadedMu.Lock() + entries, ok := f.uploaded[dir] + f.uploadedMu.Unlock() + if !ok && dir != "" { + return nil, fs.ErrorDirNotFound + } + return entries, nil +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + defer log.Trace(f, "dir=%q", dir)("err=%v", &err) + match, prefix, pattern := patterns.match(f.root, dir, false) + if pattern == nil || pattern.isFile { + return nil, fs.ErrorDirNotFound + } + if pattern.toEntries != nil { + return pattern.toEntries(ctx, f, prefix, match) + } + return nil, fs.ErrorDirNotFound +} + +// Put the object into the bucket +// +// Copy the reader in to the new object which is returned +// +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + defer log.Trace(f, "src=%+v", src)("") + // Temporary Object under construction + o := &Object{ + fs: f, + remote: src.Remote(), + } + return o, o.Update(ctx, in, src, options...) +} + +// createAlbum creates the album +func (f *Fs) createAlbum(ctx context.Context, albumName string) (album *api.Album, err error) { + opts := rest.Opts{ + Method: "POST", + Path: "/albums", + Parameters: url.Values{}, + } + var request = api.CreateAlbum{ + Album: &api.Album{ + Title: albumName, + }, + } + var result api.Album + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(&opts, request, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return nil, errors.Wrap(err, "couldn't create album") + } + f.albums[false].add(&result) + return &result, nil +} + +// Mkdir creates the album if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { + defer log.Trace(f, "dir=%q", dir)("err=%v", &err) + match, prefix, pattern := patterns.match(f.root, dir, false) + if pattern == nil { + return fs.ErrorDirNotFound + } + if !pattern.canMkdir { + return errCantMkdir + } + if pattern.isUpload { + f.uploadedMu.Lock() + d := fs.NewDir(strings.Trim(prefix, "/"), f.dirTime()) + f.uploaded.AddEntry(d) + f.uploadedMu.Unlock() + return nil + } + albumName := match[1] + allAlbums, err := f.listAlbums(false) + if err != nil { + return err + } + _, ok := allAlbums.get(albumName) + if ok { + return nil + } + _, err = f.createAlbum(ctx, albumName) + return err +} + +// Rmdir deletes the bucket if the fs is at the root +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { + defer log.Trace(f, "dir=%q")("err=%v", &err) + match, _, pattern := patterns.match(f.root, dir, false) + if pattern == nil { + return fs.ErrorDirNotFound + } + if !pattern.canMkdir { + return errCantRmdir + } + if pattern.isUpload { + f.uploadedMu.Lock() + err = f.uploaded.Prune(map[string]bool{ + dir: true, + }) + f.uploadedMu.Unlock() + return err + } + albumName := match[1] + allAlbums, err := f.listAlbums(false) + if err != nil { + return err + } + album, ok := allAlbums.get(albumName) + if !ok { + return fs.ErrorDirNotFound + } + _ = album + return errAlbumDelete +} + +// Precision returns the precision +func (f *Fs) Precision() time.Duration { + return fs.ModTimeNotSupported +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the Md5sum of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + defer log.Trace(o, "")("") + if !o.fs.opt.ReadSize || o.bytes >= 0 { + return o.bytes + } + ctx := context.TODO() + err := o.readMetaData(ctx) + if err != nil { + fs.Debugf(o, "Size: Failed to read metadata: %v", err) + return -1 + } + var resp *http.Response + opts := rest.Opts{ + Method: "HEAD", + RootURL: o.downloadURL(), + } + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.Call(&opts) + return shouldRetry(resp, err) + }) + if err != nil { + fs.Debugf(o, "Reading size failed: %v", err) + } else { + lengthStr := resp.Header.Get("Content-Length") + length, err := strconv.ParseInt(lengthStr, 10, 64) + if err != nil { + fs.Debugf(o, "Reading size failed to parse Content_length %q: %v", lengthStr, err) + } else { + o.bytes = length + } + } + return o.bytes +} + +// setMetaData sets the fs data from a storage.Object +func (o *Object) setMetaData(info *api.MediaItem) { + o.url = info.BaseURL + o.id = info.ID + o.bytes = -1 // FIXME + o.mimeType = info.MimeType + o.modTime = info.MediaMetadata.CreationTime +} + +// readMetaData gets the metadata if it hasn't already been fetched +// +// it also sets the info +func (o *Object) readMetaData(ctx context.Context) (err error) { + if !o.modTime.IsZero() && o.url != "" { + return nil + } + dir, fileName := path.Split(o.remote) + dir = strings.Trim(dir, "/") + _, _, pattern := patterns.match(o.fs.root, o.remote, true) + if pattern == nil { + return fs.ErrorObjectNotFound + } + if !pattern.isFile { + return fs.ErrorNotAFile + } + // If have ID fetch it directly + if id := findID(fileName); id != "" { + opts := rest.Opts{ + Method: "GET", + Path: "/mediaItems/" + id, + } + var item api.MediaItem + var resp *http.Response + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(&opts, nil, &item) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "couldn't get media item") + } + o.setMetaData(&item) + return nil + } + // Otherwise list the directory the file is in + entries, err := o.fs.List(ctx, dir) + if err != nil { + if err == fs.ErrorDirNotFound { + return fs.ErrorObjectNotFound + } + return err + } + // and find the file in the directory + for _, entry := range entries { + if entry.Remote() == o.remote { + if newO, ok := entry.(*Object); ok { + *o = *newO + return nil + } + } + } + return fs.ErrorObjectNotFound +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + defer log.Trace(o, "")("") + err := o.readMetaData(ctx) + if err != nil { + fs.Debugf(o, "ModTime: Failed to read metadata: %v", err) + return time.Now() + } + return o.modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { + return fs.ErrorCantSetModTime +} + +// Storable returns a boolean as to whether this object is storable +func (o *Object) Storable() bool { + return true +} + +// downloadURL returns the URL for a full bytes download for the object +func (o *Object) downloadURL() string { + url := o.url + "=d" + if strings.HasPrefix(o.mimeType, "video/") { + url += "v" + } + return url +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + defer log.Trace(o, "")("") + err = o.readMetaData(ctx) + if err != nil { + fs.Debugf(o, "Open: Failed to read metadata: %v", err) + return nil, err + } + var resp *http.Response + opts := rest.Opts{ + Method: "GET", + RootURL: o.downloadURL(), + Options: options, + } + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.Call(&opts) + return shouldRetry(resp, err) + }) + if err != nil { + return nil, err + } + return resp.Body, err +} + +// Update the object with the contents of the io.Reader, modTime and size +// +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + defer log.Trace(o, "src=%+v", src)("err=%v", &err) + match, _, pattern := patterns.match(o.fs.root, o.remote, true) + if pattern == nil || !pattern.isFile || !pattern.canUpload { + return errCantUpload + } + var ( + albumID string + fileName string + ) + if pattern.isUpload { + fileName = match[1] + } else { + var albumName string + albumName, fileName = match[1], match[2] + + // Create album if not found + album, ok := o.fs.albums[false].get(albumName) + if !ok { + album, err = o.fs.createAlbum(ctx, albumName) + if err != nil { + return err + } + } + + // Check we can write to this album + if !album.IsWriteable { + return errOwnAlbums + } + + albumID = album.ID + } + + // Upload the media item in exchange for an UploadToken + opts := rest.Opts{ + Method: "POST", + Path: "/uploads", + ExtraHeaders: map[string]string{ + "X-Goog-Upload-File-Name": fileName, + "X-Goog-Upload-Protocol": "raw", + }, + Body: in, + } + var token []byte + var resp *http.Response + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + resp, err = o.fs.srv.Call(&opts) + if err != nil { + _ = resp.Body.Close() + return shouldRetry(resp, err) + } + token, err = rest.ReadBody(resp) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "couldn't upload file") + } + uploadToken := strings.TrimSpace(string(token)) + if uploadToken == "" { + return errors.New("empty upload token") + } + + // Create the media item from an UploadToken, optionally adding to an album + opts = rest.Opts{ + Method: "POST", + Path: "/mediaItems:batchCreate", + } + var request = api.BatchCreateRequest{ + AlbumID: albumID, + NewMediaItems: []api.NewMediaItem{ + { + SimpleMediaItem: api.SimpleMediaItem{ + UploadToken: uploadToken, + }, + }, + }, + } + var result api.BatchCreateResponse + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(&opts, request, &result) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "failed to create media item") + } + if len(result.NewMediaItemResults) != 1 { + return errors.New("bad response to BatchCreate wrong number of items") + } + mediaItemResult := result.NewMediaItemResults[0] + if mediaItemResult.Status.Code != 0 { + return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code) + } + o.setMetaData(&mediaItemResult.MediaItem) + + // Add upload to internal storage + if pattern.isUpload { + o.fs.uploaded.AddEntry(o) + } + return nil +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) (err error) { + match, _, pattern := patterns.match(o.fs.root, o.remote, true) + if pattern == nil || !pattern.isFile || !pattern.canUpload || pattern.isUpload { + return errRemove + } + albumName, fileName := match[1], match[2] + album, ok := o.fs.albums[false].get(albumName) + if !ok { + return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumName) + } + opts := rest.Opts{ + Method: "POST", + Path: "/albums/" + album.ID + ":batchRemoveMediaItems", + NoResponse: true, + } + var request = api.BatchRemoveItems{ + MediaItemIds: []string{o.id}, + } + var resp *http.Response + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(&opts, &request, nil) + return shouldRetry(resp, err) + }) + if err != nil { + return errors.Wrap(err, "couldn't delete item from album") + } + return nil +} + +// MimeType of an Object if known, "" otherwise +func (o *Object) MimeType(ctx context.Context) string { + return o.mimeType +} + +// ID of an Object if known, "" otherwise +func (o *Object) ID() string { + return o.id +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = &Fs{} + _ fs.Object = &Object{} + _ fs.MimeTyper = &Object{} + _ fs.IDer = &Object{} +) diff --git a/backend/googlephotos/googlephotos_test.go b/backend/googlephotos/googlephotos_test.go new file mode 100644 index 000000000..9bc591c34 --- /dev/null +++ b/backend/googlephotos/googlephotos_test.go @@ -0,0 +1,306 @@ +package googlephotos + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "path" + "testing" + "time" + + _ "github.com/ncw/rclone/backend/local" + "github.com/ncw/rclone/fs" + "github.com/ncw/rclone/fs/hash" + "github.com/ncw/rclone/fstest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + // We have two different files here as Google Photos will uniq + // them otherwise which confuses the tests as the filename is + // unexpected. + fileNameAlbum = "rclone-test-image1.jpg" + fileNameUpload = "rclone-test-image2.jpg" +) + +// Wrapper to override the remote for an object +type overrideRemoteObject struct { + fs.Object + remote string +} + +// Remote returns the overridden remote name +func (o *overrideRemoteObject) Remote() string { + return o.remote +} + +func TestIntegration(t *testing.T) { + ctx := context.Background() + fstest.Initialise() + + // Create Fs + if *fstest.RemoteName == "" { + *fstest.RemoteName = "TestGooglePhotos:" + } + f, err := fs.NewFs(*fstest.RemoteName) + if err == fs.ErrorNotFoundInConfigFile { + t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err)) + } + require.NoError(t, err) + + // Create local Fs pointing at testfiles + localFs, err := fs.NewFs("testfiles") + require.NoError(t, err) + + t.Run("CreateAlbum", func(t *testing.T) { + albumName := "album/rclone-test-" + fstest.RandomString(24) + err = f.Mkdir(ctx, albumName) + require.NoError(t, err) + remote := albumName + "/" + fileNameAlbum + + t.Run("PutFile", func(t *testing.T) { + srcObj, err := localFs.NewObject(ctx, fileNameAlbum) + require.NoError(t, err) + in, err := srcObj.Open(ctx) + require.NoError(t, err) + dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote}) + require.NoError(t, err) + assert.Equal(t, remote, dstObj.Remote()) + _ = in.Close() + remoteWithID := addFileID(remote, dstObj.(*Object).id) + + t.Run("ObjectFs", func(t *testing.T) { + assert.Equal(t, f, dstObj.Fs()) + }) + + t.Run("ObjectString", func(t *testing.T) { + assert.Equal(t, remote, dstObj.String()) + assert.Equal(t, "", (*Object)(nil).String()) + }) + + t.Run("ObjectHash", func(t *testing.T) { + h, err := dstObj.Hash(ctx, hash.MD5) + assert.Equal(t, "", h) + assert.Equal(t, hash.ErrUnsupported, err) + }) + + t.Run("ObjectSize", func(t *testing.T) { + assert.Equal(t, int64(-1), dstObj.Size()) + f.(*Fs).opt.ReadSize = true + defer func() { + f.(*Fs).opt.ReadSize = false + }() + size := dstObj.Size() + assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size)) + }) + + t.Run("ObjectSetModTime", func(t *testing.T) { + err := dstObj.SetModTime(ctx, time.Now()) + assert.Equal(t, fs.ErrorCantSetModTime, err) + }) + + t.Run("ObjectStorable", func(t *testing.T) { + assert.True(t, dstObj.Storable()) + }) + + t.Run("ObjectOpen", func(t *testing.T) { + in, err := dstObj.Open(ctx) + require.NoError(t, err) + buf, err := ioutil.ReadAll(in) + require.NoError(t, err) + require.NoError(t, in.Close()) + assert.True(t, len(buf) > 1000) + contentType := http.DetectContentType(buf[:512]) + assert.Equal(t, "image/jpeg", contentType) + }) + + t.Run("CheckFileInAlbum", func(t *testing.T) { + entries, err := f.List(ctx, albumName) + require.NoError(t, err) + assert.Equal(t, 1, len(entries)) + assert.Equal(t, remote, entries[0].Remote()) + assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String()) + }) + + // Check it is there in the date/month/year heirachy + // 2013-07-13 is the creation date of the folder + checkPresent := func(t *testing.T, objPath string) { + entries, err := f.List(ctx, objPath) + require.NoError(t, err) + found := false + for _, entry := range entries { + leaf := path.Base(entry.Remote()) + if leaf == fileNameAlbum || leaf == remoteWithID { + found = true + } + } + assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath)) + } + + t.Run("CheckInByYear", func(t *testing.T) { + checkPresent(t, "media/by-year/2013") + }) + + t.Run("CheckInByMonth", func(t *testing.T) { + checkPresent(t, "media/by-month/2013/2013-07") + }) + + t.Run("CheckInByDay", func(t *testing.T) { + checkPresent(t, "media/by-day/2013/2013-07-26") + }) + + t.Run("NewObject", func(t *testing.T) { + o, err := f.NewObject(ctx, remote) + require.NoError(t, err) + require.Equal(t, remote, o.Remote()) + }) + + t.Run("NewObjectWithID", func(t *testing.T) { + o, err := f.NewObject(ctx, remoteWithID) + require.NoError(t, err) + require.Equal(t, remoteWithID, o.Remote()) + }) + + t.Run("NewFsIsFile", func(t *testing.T) { + fNew, err := fs.NewFs(*fstest.RemoteName + remote) + assert.Equal(t, fs.ErrorIsFile, err) + leaf := path.Base(remote) + o, err := fNew.NewObject(ctx, leaf) + require.NoError(t, err) + require.Equal(t, leaf, o.Remote()) + }) + + t.Run("RemoveFileFromAlbum", func(t *testing.T) { + err = dstObj.Remove(ctx) + require.NoError(t, err) + + time.Sleep(time.Second) + + // Check album empty + entries, err := f.List(ctx, albumName) + require.NoError(t, err) + assert.Equal(t, 0, len(entries)) + }) + }) + + // remove the album + err = f.Rmdir(ctx, albumName) + require.Error(t, err) // FIXME doesn't work yet + }) + + t.Run("UploadMkdir", func(t *testing.T) { + assert.NoError(t, f.Mkdir(ctx, "upload/dir")) + assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir")) + + t.Run("List", func(t *testing.T) { + entries, err := f.List(ctx, "upload") + require.NoError(t, err) + assert.Equal(t, 1, len(entries)) + assert.Equal(t, "upload/dir", entries[0].Remote()) + + entries, err = f.List(ctx, "upload/dir") + require.NoError(t, err) + assert.Equal(t, 1, len(entries)) + assert.Equal(t, "upload/dir/subdir", entries[0].Remote()) + }) + + t.Run("Rmdir", func(t *testing.T) { + assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir")) + assert.NoError(t, f.Rmdir(ctx, "upload/dir")) + + }) + + t.Run("ListEmpty", func(t *testing.T) { + entries, err := f.List(ctx, "upload") + require.NoError(t, err) + assert.Equal(t, 0, len(entries)) + + _, err = f.List(ctx, "upload/dir") + assert.Equal(t, fs.ErrorDirNotFound, err) + }) + }) + + t.Run("Upload", func(t *testing.T) { + uploadDir := "upload/dir/subdir" + remote := path.Join(uploadDir, fileNameUpload) + + srcObj, err := localFs.NewObject(ctx, fileNameUpload) + require.NoError(t, err) + in, err := srcObj.Open(ctx) + require.NoError(t, err) + dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote}) + require.NoError(t, err) + assert.Equal(t, remote, dstObj.Remote()) + _ = in.Close() + remoteWithID := addFileID(remote, dstObj.(*Object).id) + + t.Run("List", func(t *testing.T) { + entries, err := f.List(ctx, uploadDir) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + assert.Equal(t, remote, entries[0].Remote()) + assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String()) + }) + + t.Run("NewObject", func(t *testing.T) { + o, err := f.NewObject(ctx, remote) + require.NoError(t, err) + require.Equal(t, remote, o.Remote()) + }) + + t.Run("NewObjectWithID", func(t *testing.T) { + o, err := f.NewObject(ctx, remoteWithID) + require.NoError(t, err) + require.Equal(t, remoteWithID, o.Remote()) + }) + + }) + + t.Run("Name", func(t *testing.T) { + assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name()) + }) + + t.Run("Root", func(t *testing.T) { + assert.Equal(t, "", f.Root()) + }) + + t.Run("String", func(t *testing.T) { + assert.Equal(t, `Google Photos path ""`, f.String()) + }) + + t.Run("Features", func(t *testing.T) { + features := f.Features() + assert.False(t, features.CaseInsensitive) + assert.True(t, features.ReadMimeType) + }) + + t.Run("Precision", func(t *testing.T) { + assert.Equal(t, fs.ModTimeNotSupported, f.Precision()) + }) + + t.Run("Hashes", func(t *testing.T) { + assert.Equal(t, hash.Set(hash.None), f.Hashes()) + }) + +} + +func TestAddID(t *testing.T) { + assert.Equal(t, "potato {123}", addID("potato", "123")) + assert.Equal(t, "{123}", addID("", "123")) +} + +func TestFileAddID(t *testing.T) { + assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123")) + assert.Equal(t, "potato {123}", addFileID("potato", "123")) + assert.Equal(t, "{123}", addFileID("", "123")) +} + +func TestFindID(t *testing.T) { + assert.Equal(t, "", findID("potato")) + ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + assert.Equal(t, ID, findID("potato {"+ID+"}.txt")) + ID = ID[1:] + assert.Equal(t, "", findID("potato {"+ID+"}.txt")) +} diff --git a/backend/googlephotos/pattern.go b/backend/googlephotos/pattern.go new file mode 100644 index 000000000..5e2e242cb --- /dev/null +++ b/backend/googlephotos/pattern.go @@ -0,0 +1,335 @@ +// Store the parsing of file patterns + +package googlephotos + +import ( + "context" + "fmt" + "path" + "regexp" + "strconv" + "strings" + "time" + + "github.com/ncw/rclone/backend/googlephotos/api" + "github.com/ncw/rclone/fs" + "github.com/pkg/errors" +) + +// lister describes the subset of the interfaces on Fs needed for the +// file pattern parsing +type lister interface { + listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) + listAlbums(shared bool) (all *albums, err error) + listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) + dirTime() time.Time +} + +// dirPattern describes a single directory pattern +type dirPattern struct { + re string // match for the path + match *regexp.Regexp // compiled match + canUpload bool // true if can upload here + canMkdir bool // true if can make a directory here + isFile bool // true if this is a file + isUpload bool // true if this is the upload directory + // function to turn a match into DirEntries + toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) +} + +// dirPatters is a slice of all the directory patterns +type dirPatterns []dirPattern + +// patterns describes the layout of the google photos backend file system. +// +// NB no trailing / on paths +var patterns = dirPatterns{ + { + re: `^$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { + return fs.DirEntries{ + fs.NewDir(prefix+"media", f.dirTime()), + fs.NewDir(prefix+"album", f.dirTime()), + fs.NewDir(prefix+"shared-album", f.dirTime()), + fs.NewDir(prefix+"upload", f.dirTime()), + }, nil + }, + }, + { + re: `^upload(?:/(.*))?$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { + return f.listUploads(ctx, match[0]) + }, + canUpload: true, + canMkdir: true, + isUpload: true, + }, + { + re: `^upload/(.*)$`, + isFile: true, + canUpload: true, + isUpload: true, + }, + { + re: `^media$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { + return fs.DirEntries{ + fs.NewDir(prefix+"all", f.dirTime()), + fs.NewDir(prefix+"by-year", f.dirTime()), + fs.NewDir(prefix+"by-month", f.dirTime()), + fs.NewDir(prefix+"by-day", f.dirTime()), + }, nil + }, + }, + { + re: `^media/all$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { + return f.listDir(ctx, prefix, api.SearchFilter{}) + }, + }, + { + re: `^media/all/([^/]+)$`, + isFile: true, + }, + { + re: `^media/by-year$`, + toEntries: years, + }, + { + re: `^media/by-year/(\d{4})$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { + filter, err := yearMonthDayFilter(ctx, f, match) + if err != nil { + return nil, err + } + return f.listDir(ctx, prefix, filter) + }, + }, + { + re: `^media/by-year/(\d{4})/([^/]+)$`, + isFile: true, + }, + { + re: `^media/by-month$`, + toEntries: years, + }, + { + re: `^media/by-month/(\d{4})$`, + toEntries: months, + }, + { + re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { + filter, err := yearMonthDayFilter(ctx, f, match) + if err != nil { + return nil, err + } + return f.listDir(ctx, prefix, filter) + }, + }, + { + re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`, + isFile: true, + }, + { + re: `^media/by-day$`, + toEntries: years, + }, + { + re: `^media/by-day/(\d{4})$`, + toEntries: days, + }, + { + re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) { + filter, err := yearMonthDayFilter(ctx, f, match) + if err != nil { + return nil, err + } + return f.listDir(ctx, prefix, filter) + }, + }, + { + re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`, + isFile: true, + }, + { + re: `^album$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { + return albumsToEntries(ctx, f, false, prefix, "") + }, + }, + { + re: `^album/(.+)$`, + canMkdir: true, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { + return albumsToEntries(ctx, f, false, prefix, match[1]) + + }, + }, + { + re: `^album/(.+?)/([^/]+)$`, + canUpload: true, + isFile: true, + }, + { + re: `^shared-album$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { + return albumsToEntries(ctx, f, true, prefix, "") + }, + }, + { + re: `^shared-album/(.+)$`, + toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { + return albumsToEntries(ctx, f, true, prefix, match[1]) + + }, + }, + { + re: `^shared-album/(.+?)/([^/]+)$`, + isFile: true, + }, +}.mustCompile() + +// mustCompile compiles the regexps in the dirPatterns +func (ds dirPatterns) mustCompile() dirPatterns { + for i := range ds { + pattern := &ds[i] + pattern.match = regexp.MustCompile(pattern.re) + } + return ds +} + +// match finds the path passed in in the matching structure and +// returns the parameters and a pointer to the match, or nil. +func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) { + itemPath = strings.Trim(itemPath, "/") + absPath := path.Join(root, itemPath) + prefix = strings.Trim(absPath[len(root):], "/") + if prefix != "" { + prefix += "/" + } + for i := range ds { + pattern = &ds[i] + if pattern.isFile != isFile { + continue + } + match = pattern.match.FindStringSubmatch(absPath) + if match != nil { + return + } + } + return nil, "", nil +} + +// Return the years from 2000 to today +// FIXME make configurable? +func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { + currentYear := f.dirTime().Year() + for year := 2000; year <= currentYear; year++ { + entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime())) + } + return entries, nil +} + +// Return the months in a given year +func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { + year := match[1] + for month := 1; month <= 12; month++ { + entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime())) + } + return entries, nil +} + +// Return the days in a given year +func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { + year := match[1] + current, err := time.Parse("2006", year) + if err != nil { + return nil, errors.Errorf("bad year %q", match[1]) + } + currentYear := current.Year() + for current.Year() == currentYear { + entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime())) + current = current.AddDate(0, 0, 1) + } + return entries, nil +} + +// This creates a search filter on year/month/day as provided +func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) { + year, err := strconv.Atoi(match[1]) + if err != nil || year < 1000 || year > 3000 { + return sf, errors.Errorf("bad year %q", match[1]) + } + sf = api.SearchFilter{ + Filters: &api.Filters{ + DateFilter: &api.DateFilter{ + Dates: []api.Date{ + { + Year: year, + }, + }, + }, + }, + } + if len(match) >= 3 { + month, err := strconv.Atoi(match[2]) + if err != nil || month < 1 || month > 12 { + return sf, errors.Errorf("bad month %q", match[2]) + } + sf.Filters.DateFilter.Dates[0].Month = month + } + if len(match) >= 4 { + day, err := strconv.Atoi(match[3]) + if err != nil || day < 1 || day > 31 { + return sf, errors.Errorf("bad day %q", match[3]) + } + sf.Filters.DateFilter.Dates[0].Day = day + } + return sf, nil +} + +// Turns an albumPath into entries +// +// These can either be synthetic directory entries if the album path +// is a prefix of another album, or actual files, or a combination of +// the two. +func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) { + albums, err := f.listAlbums(shared) + if err != nil { + return nil, err + } + // Put in the directories + dirs, foundAlbumPath := albums.getDirs(albumPath) + if foundAlbumPath { + for _, dir := range dirs { + d := fs.NewDir(prefix+dir, f.dirTime()) + dirPath := path.Join(albumPath, dir) + // if this dir is an album add more special stuff + album, ok := albums.get(dirPath) + if ok { + count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64) + if err != nil { + fs.Debugf(f, "Error reading media count: %v", err) + } + d.SetID(album.ID).SetItems(count) + } + entries = append(entries, d) + } + } + // if this is an album then return a filter to list it + album, foundAlbum := albums.get(albumPath) + if foundAlbum { + filter := api.SearchFilter{AlbumID: album.ID} + newEntries, err := f.listDir(ctx, prefix, filter) + if err != nil { + return nil, err + } + entries = append(entries, newEntries...) + } + if !foundAlbumPath && !foundAlbum && albumPath != "" { + return nil, fs.ErrorDirNotFound + } + return entries, nil +} diff --git a/backend/googlephotos/pattern_test.go b/backend/googlephotos/pattern_test.go new file mode 100644 index 000000000..1db836a6a --- /dev/null +++ b/backend/googlephotos/pattern_test.go @@ -0,0 +1,495 @@ +package googlephotos + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ncw/rclone/backend/googlephotos/api" + "github.com/ncw/rclone/fs" + "github.com/ncw/rclone/fs/dirtree" + "github.com/ncw/rclone/fstest" + "github.com/ncw/rclone/fstest/mockobject" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// time for directories +var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z") + +// mock Fs for testing patterns +type testLister struct { + t *testing.T + albums *albums + names []string + uploaded dirtree.DirTree +} + +// newTestLister makes a mock for testing +func newTestLister(t *testing.T) *testLister { + return &testLister{ + t: t, + albums: newAlbums(), + uploaded: dirtree.New(), + } +} + +// mock listDir for testing +func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) { + for _, name := range f.names { + entries = append(entries, mockobject.New(prefix+name)) + } + return entries, nil +} + +// mock listAlbums for testing +func (f *testLister) listAlbums(shared bool) (all *albums, err error) { + return f.albums, nil +} + +// mock listUploads for testing +func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + entries, _ = f.uploaded[dir] + return entries, nil +} + +// mock dirTime for testing +func (f *testLister) dirTime() time.Time { + return startTime +} + +func TestPatternMatch(t *testing.T) { + for testNumber, test := range []struct { + // input + root string + itemPath string + isFile bool + // expected output + wantMatch []string + wantPrefix string + wantPattern *dirPattern + }{ + { + root: "", + itemPath: "", + isFile: false, + wantMatch: []string{""}, + wantPrefix: "", + wantPattern: &patterns[0], + }, + { + root: "", + itemPath: "", + isFile: true, + wantMatch: nil, + wantPrefix: "", + wantPattern: nil, + }, + { + root: "upload", + itemPath: "", + isFile: false, + wantMatch: []string{"upload", ""}, + wantPrefix: "", + wantPattern: &patterns[1], + }, + { + root: "upload/dir", + itemPath: "", + isFile: false, + wantMatch: []string{"upload/dir", "dir"}, + wantPrefix: "", + wantPattern: &patterns[1], + }, + { + root: "upload/file.jpg", + itemPath: "", + isFile: true, + wantMatch: []string{"upload/file.jpg", "file.jpg"}, + wantPrefix: "", + wantPattern: &patterns[2], + }, + { + root: "media", + itemPath: "", + isFile: false, + wantMatch: []string{"media"}, + wantPrefix: "", + wantPattern: &patterns[3], + }, + { + root: "", + itemPath: "media", + isFile: false, + wantMatch: []string{"media"}, + wantPrefix: "media/", + wantPattern: &patterns[3], + }, + { + root: "media/all", + itemPath: "", + isFile: false, + wantMatch: []string{"media/all"}, + wantPrefix: "", + wantPattern: &patterns[4], + }, + { + root: "media", + itemPath: "all", + isFile: false, + wantMatch: []string{"media/all"}, + wantPrefix: "all/", + wantPattern: &patterns[4], + }, + { + root: "media/all", + itemPath: "file.jpg", + isFile: true, + wantMatch: []string{"media/all/file.jpg", "file.jpg"}, + wantPrefix: "file.jpg/", + wantPattern: &patterns[5], + }, + } { + t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) { + gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile) + assert.Equal(t, test.wantMatch, gotMatch) + assert.Equal(t, test.wantPrefix, gotPrefix) + assert.Equal(t, test.wantPattern, gotPattern) + }) + } +} + +func TestPatternMatchToEntries(t *testing.T) { + ctx := context.Background() + f := newTestLister(t) + f.names = []string{"file.jpg"} + f.albums.add(&api.Album{ + ID: "1", + Title: "sub/one", + }) + f.albums.add(&api.Album{ + ID: "2", + Title: "sub", + }) + f.uploaded.AddEntry(mockobject.New("upload/file1.jpg")) + f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg")) + + for testNumber, test := range []struct { + // input + root string + itemPath string + // expected output + wantMatch []string + wantPrefix string + remotes []string + }{ + { + root: "", + itemPath: "", + wantMatch: []string{""}, + wantPrefix: "", + remotes: []string{"media/", "album/", "shared-album/", "upload/"}, + }, + { + root: "upload", + itemPath: "", + wantMatch: []string{"upload", ""}, + wantPrefix: "", + remotes: []string{"upload/file1.jpg", "upload/dir/"}, + }, + { + root: "upload", + itemPath: "dir", + wantMatch: []string{"upload/dir", "dir"}, + wantPrefix: "dir/", + remotes: []string{"upload/dir/file2.jpg"}, + }, + { + root: "media", + itemPath: "", + wantMatch: []string{"media"}, + wantPrefix: "", + remotes: []string{"all/", "by-year/", "by-month/", "by-day/"}, + }, + { + root: "media/all", + itemPath: "", + wantMatch: []string{"media/all"}, + wantPrefix: "", + remotes: []string{"file.jpg"}, + }, + { + root: "media", + itemPath: "all", + wantMatch: []string{"media/all"}, + wantPrefix: "all/", + remotes: []string{"all/file.jpg"}, + }, + { + root: "media/by-year", + itemPath: "", + wantMatch: []string{"media/by-year"}, + wantPrefix: "", + remotes: []string{"2000/", "2001/", "2002/", "2003/"}, + }, + { + root: "media/by-year/2000", + itemPath: "", + wantMatch: []string{"media/by-year/2000", "2000"}, + wantPrefix: "", + remotes: []string{"file.jpg"}, + }, + { + root: "media/by-month", + itemPath: "", + wantMatch: []string{"media/by-month"}, + wantPrefix: "", + remotes: []string{"2000/", "2001/", "2002/", "2003/"}, + }, + { + root: "media/by-month/2001", + itemPath: "", + wantMatch: []string{"media/by-month/2001", "2001"}, + wantPrefix: "", + remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"}, + }, + { + root: "media/by-month/2001/2001-01", + itemPath: "", + wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"}, + wantPrefix: "", + remotes: []string{"file.jpg"}, + }, + { + root: "media/by-day", + itemPath: "", + wantMatch: []string{"media/by-day"}, + wantPrefix: "", + remotes: []string{"2000/", "2001/", "2002/", "2003/"}, + }, + { + root: "media/by-day/2001", + itemPath: "", + wantMatch: []string{"media/by-day/2001", "2001"}, + wantPrefix: "", + remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"}, + }, + { + root: "media/by-day/2001/2001-01-02", + itemPath: "", + wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"}, + wantPrefix: "", + remotes: []string{"file.jpg"}, + }, + { + root: "album", + itemPath: "", + wantMatch: []string{"album"}, + wantPrefix: "", + remotes: []string{"sub/"}, + }, + { + root: "album/sub", + itemPath: "", + wantMatch: []string{"album/sub", "sub"}, + wantPrefix: "", + remotes: []string{"one/", "file.jpg"}, + }, + { + root: "album/sub/one", + itemPath: "", + wantMatch: []string{"album/sub/one", "sub/one"}, + wantPrefix: "", + remotes: []string{"file.jpg"}, + }, + { + root: "shared-album", + itemPath: "", + wantMatch: []string{"shared-album"}, + wantPrefix: "", + remotes: []string{"sub/"}, + }, + { + root: "shared-album/sub", + itemPath: "", + wantMatch: []string{"shared-album/sub", "sub"}, + wantPrefix: "", + remotes: []string{"one/", "file.jpg"}, + }, + { + root: "shared-album/sub/one", + itemPath: "", + wantMatch: []string{"shared-album/sub/one", "sub/one"}, + wantPrefix: "", + remotes: []string{"file.jpg"}, + }, + } { + t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) { + match, prefix, pattern := patterns.match(test.root, test.itemPath, false) + assert.Equal(t, test.wantMatch, match) + assert.Equal(t, test.wantPrefix, prefix) + assert.NotNil(t, pattern) + assert.NotNil(t, pattern.toEntries) + + entries, err := pattern.toEntries(ctx, f, prefix, match) + assert.NoError(t, err) + var remotes = []string{} + for _, entry := range entries { + remote := entry.Remote() + if _, isDir := entry.(fs.Directory); isDir { + remote += "/" + } + remotes = append(remotes, remote) + if len(remotes) >= 4 { + break // only test first 4 entries + } + } + assert.Equal(t, test.remotes, remotes) + }) + } +} + +func TestPatternYears(t *testing.T) { + f := newTestLister(t) + entries, err := years(context.Background(), f, "potato/", nil) + require.NoError(t, err) + + year := 2000 + for _, entry := range entries { + assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote()) + year++ + } +} + +func TestPatternMonths(t *testing.T) { + f := newTestLister(t) + entries, err := months(context.Background(), f, "potato/", []string{"", "2020"}) + require.NoError(t, err) + + assert.Equal(t, 12, len(entries)) + for i, entry := range entries { + assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote()) + } +} + +func TestPatternDays(t *testing.T) { + f := newTestLister(t) + entries, err := days(context.Background(), f, "potato/", []string{"", "2020"}) + require.NoError(t, err) + + assert.Equal(t, 366, len(entries)) + assert.Equal(t, "potato/2020-01-01", entries[0].Remote()) + assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote()) +} + +func TestPatternYearMonthDayFilter(t *testing.T) { + ctx := context.Background() + f := newTestLister(t) + + // Years + sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"}) + require.NoError(t, err) + assert.Equal(t, api.SearchFilter{ + Filters: &api.Filters{ + DateFilter: &api.DateFilter{ + Dates: []api.Date{ + { + Year: 2000, + }, + }, + }, + }, + }, sf) + + _, err = yearMonthDayFilter(ctx, f, []string{"", "potato"}) + require.Error(t, err) + _, err = yearMonthDayFilter(ctx, f, []string{"", "999"}) + require.Error(t, err) + _, err = yearMonthDayFilter(ctx, f, []string{"", "4000"}) + require.Error(t, err) + + // Months + sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"}) + require.NoError(t, err) + assert.Equal(t, api.SearchFilter{ + Filters: &api.Filters{ + DateFilter: &api.DateFilter{ + Dates: []api.Date{ + { + Month: 1, + Year: 2000, + }, + }, + }, + }, + }, sf) + + _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"}) + require.Error(t, err) + _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"}) + require.Error(t, err) + _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"}) + require.Error(t, err) + + // Days + sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"}) + require.NoError(t, err) + assert.Equal(t, api.SearchFilter{ + Filters: &api.Filters{ + DateFilter: &api.DateFilter{ + Dates: []api.Date{ + { + Day: 2, + Month: 1, + Year: 2000, + }, + }, + }, + }, + }, sf) + + _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"}) + require.Error(t, err) + _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"}) + require.Error(t, err) + _, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"}) + require.Error(t, err) +} + +func TestPatternAlbumsToEntries(t *testing.T) { + f := newTestLister(t) + ctx := context.Background() + + _, err := albumsToEntries(ctx, f, false, "potato/", "sub") + assert.Equal(t, fs.ErrorDirNotFound, err) + + f.albums.add(&api.Album{ + ID: "1", + Title: "sub/one", + }) + + entries, err := albumsToEntries(ctx, f, false, "potato/", "sub") + assert.NoError(t, err) + assert.Equal(t, 1, len(entries)) + assert.Equal(t, "potato/one", entries[0].Remote()) + _, ok := entries[0].(fs.Directory) + assert.Equal(t, true, ok) + + f.albums.add(&api.Album{ + ID: "1", + Title: "sub", + }) + f.names = []string{"file.jpg"} + + entries, err = albumsToEntries(ctx, f, false, "potato/", "sub") + assert.NoError(t, err) + assert.Equal(t, 2, len(entries)) + assert.Equal(t, "potato/one", entries[0].Remote()) + _, ok = entries[0].(fs.Directory) + assert.Equal(t, true, ok) + assert.Equal(t, "potato/file.jpg", entries[1].Remote()) + _, ok = entries[1].(fs.Object) + assert.Equal(t, true, ok) + +} diff --git a/backend/googlephotos/testfiles/rclone-test-image1.jpg b/backend/googlephotos/testfiles/rclone-test-image1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c44cfacd856d127ce1a0f0c49e85e07dc6c2a64 GIT binary patch literal 16552 zcmeIZcU)7=);}7W6j6#G9YRq$B%ya{q1RBPsSpB$5(uFq7O>HqfOL`Gr8lu4y-M#W z(nLU%DoXk7pgzxY&U@a^y}$eKJrj2J+B0jdS>H8lX76lfolKm308wkhwcsEE0s>GX z@By97^D?UYp&dXVEiC~M83+U-2T>6ag3bU`0B}$P^f};s2RN7sNI(R@ISM#fe$#*o z5FY{2X+0r8LjitD;G6**Fo325&PBlSB$yCH4V+&B2l^rLDStme-vNYFZ87ecU*BML z0|Zz~4TLxCl;_V`Lrc#P3=@GuM5IA52vkNEA}I@#218-8P^hc~45W`nx`0(ZZPAXx z=I-wHAcntCd;(1VgW?N7|FP;2p#NA!4Oo#9{0kizpwoZTd;rb$=odO7z#`t?bPOO% z6!Qxm1mGe5O{)PkK^(=Y#1VpsfjSAO1Sl4u@wPx9AW+;tXu_OddVnSn(Ld-j|Df@0|H<2cN5y3!VzN+4 zFjPtwCMgT~pB@DugPsD1m{=9?4aSdxm@EX(4Tea|N&p1(Kh}Y6kOKU8X*@#w`PVld z1O7_>_Zf&if?siguO|3S7Xz(?Kz!l3mjO=@{f;$!e4hCSP5+xd)k^qF&pN4N?V>ojF5#hJ=iil$4yDjDnKx9Oc=wluWc2sOZ?3IoR2l zSy{n6BD`QOA#PSyJ}G`7z;EK>9K13L(lB`uF>x4P2?04dIptYO#&hQwVVta-u>b3F z@)|@#1}Y{1z;b~IX$XjD2u@l-9Dto9z`Vsz-QR_PkcgP%3@I5o#aV!%h8piFB0^#! z5)xt{S_p!Gb`UWQ2`wj7`OF0)TT(789qevw78$on#T$C#!LK}GcAmlH6c-s7nV2tK z=H=rT5SNgYl9rKGRa1v+XliLAOiazpuUc3l?NJVnPH1PGm$#3vpMOBez55RyJ_-$s zi+`MunDit$~_K3&ZKqIquAX(f5rw;rzQHV z0bwg>t39FxYi=JAU~&oK!q*hmD(9b1(xyqm&uV%VRp3Yv!2Y<@_xTD(E`lhf)R#}) z_4x^itNEq`x;h@dQzJh2T{dWLE`b$xYdha^<>ip_fmO7tX%EYi7|7}%z}#1|PL|nR zR!LmQ|IMn2!z+UB`$`G4{SPQZCd}nmzqVrNwj`^?43)@crD>-3hiSrfodx?5v*wr zM(j=bILM(Q-YMd!bbGLd|t6+q&4p z&-|R+PfIh{(bw9GE~*OGSOs~LJDJ*?mrjzW%2RNmGH9?mQui$&(7cJ+;i{@WzzFp) zGEQH3V`jaQFMp(@J^p<7;t)fVg(8xL5_tkD-?*;1(&IH?W62OYZ81P=`K&1F`$6++ zk_c1iYDcN9#phr-`tAqb#4A2>na<7b9{bNp$#mGC;-W^o#rLnK-!j?i4@m6OVDdXwiNN(k z5&RVIOFxscAC+Qs@9DU4)~bbNoQyd*vuCQ-AsWd}@sqg_OYN7+ zEq_~kIoH^0<6=aX6az9vV>$sj&}`sLm6;%&H)El>&(Ua3#QiMZQqt0TadDHB=-49V zi_NrL{Lsf9EI1rE)zhQbM<^+e zP4g~LeyCw%Hk;`bdK-LBwQ7Zx`IWpJ<5%Z~*Gwf)FP2z(+d_G;h(A5|26*RiO~mTG zAS3u#f?+QU8BwmZLGkIWQDsB3wA1y31ek}CtmYC);4N?UGRkzUeq`pIrGf0WBaH8=uWmnrT)fMLMpIe)zdM$oa?^V=$`*bxvy1!beG(_cUu>QIkqcd&#wQH@2kjEtu`VOOm5%*wc znaPcrVnWatxBy4BJvO?(8J&~u;8j1#GzSjv)?5tkmbd3ewfy*|HQ(gFX|Q>gvDek7 zVn%UK)>Q7u*)QzvlCr?j323fReoMhcRk^;tVhR;WogkoLv|lwbTA-~H`^9_fC4!^P zKqR*CnOc5N%vYM)D8ju%m+Y^R?~`*6?r}hW1mD{1eT}moh>YwTU1Wf^mxN&Dzd{%) z6h}6x*1~P`%hNjVH=ZjAxUjlg7G@sZ#I5$F!D6nYK!iw^u-o9;uBXwm)j{lZao61% z&oj4~m)*P{VJ>MItGiOhRxiX_js>*pf7dX(p*dx;kc-G>@J8|@%qQ+$D>=_uA3<&v zqt(^1wCF>_>2axc}LXPS@_nE_JzrKEtq88Nai)~sdo z^@*FiWvKgFb3f?yQ{>3qFK-`Di=2SC!S_XDu+uvV#c-A$m->tTl5gnJ2pN18 zg8YMSjt3fRNPeqquvx)?Rw9m^t!!@TG}Szx2|WWJX1t-y6ZkV@G*=t@*yQ?^1hSf} z_rgVA&~BZHoc$Y30f8LtIkGtydmb8+uZ|xIurZ^#Yp!w=UVV4C!OKe=KTsY}Q!*{w zpVA`em++d=-Y~yl)3~s(!!6XvifP*!Yb{OwIp}sv=F(<3_bZ9uZwgtIOLtj4aaLwh zTvpypq;&?)?M2mYY@umxMVwKzEMH6g#A_32=O4~>7#uenDfrPBBIWNgUX<<+I1+T$ z(jr&1*KlTL3f>_aUbCdNQM=INig`m6*4G%zZTzMa6T+cg=qRV>3yuw?Fvs}!IiB|w zG2!E_y8K{i?Asesr}*g9TIa*0A{k1po#j-2#;C~*>Fl{qO=+fn!PJc9k7PX!2hn!8 zBbSPv#P%*`&ujiwkVQ`oOBc@^Qx{{NPl zK9enA$cd)El~g|tz0%bm$~IbJ^?dgMaa zjc<2eWHz-sx1|q>YnI${_5EQgml`kb$cf!4$$u%C^exe?f~0G0a{_TsW=+xjOXd8T zeD5k*mL38R`OzEaNx%LabiUQ+q_d@)ITR`(=f0jL&?#17FsnG)BU0>*9sS05yvm)~ z-xzd{uV+f_70R(cT=xEt8EhPj2X9=UiP6zupSk1~Q4;A|tQq&2=dGh|G-j6MiuW;< zp}sx)K1cJrJ7w1`9CR$CndEOhXfjJ$Y?oVZuyHiJ&#c+<5)OU6^JCm5i;rKSzg~O+ z7Z>&AMkwo}yQC_z+vCk{lk(=0-&(G&T=HYjiyPyp+^_q8ctdt6*Ezr((_vlrXd zwR0%IOWw~AGQlBczK)(;9cRX5lk~kn-I>wyA@+pGBF7rZzLasPHdFgQ^gq1K{Bc8K zsB)bqN}7q|C}8TrhNGziF_i5g-KU{UuGD~F^ZoE2nc>ynqrmm>pr>~RmCA=27$wNP zU7poxht|O4pKILq401sjJm2Cu$L(-Ke>Q)qUGBK%Vt$!{YZyJZkml#}(1B*OjAVnI zaq6wl@b(Px&5Dis47E#FsR?#}m@9l_6Bw1Ww$KtP73$&WheJcw<-$tb-W$H6%`$~i zv-q-lNbeWdjm@d2Sn>A=ZW-vZo`52;!`4w-4lcOduW51U093Uc+`*2+SDIx&H^?Oq z21|tA@eCYKeB(KFQ-$xfW9pLI%-vOB5!HBWtm>Yxdi=ag9GbLxM`a}%<5DTrgq~>- z6ioAd^d+ZDh2hflL0HrLw^TLWhJj;QL35+Q>=Ey)AI!bQUPQ)SX_wh4WpO`Sde8YK zTH$!VRzWZhn+8OXT~z7QTq~I$mTxh2 z#wI$GE#n~8JOhGjCvA2|551g^fdWr`3U;q&x_M{st5LxHKL<6heq05syoR>jnAv0_ z<(Wx{@{Ia*j`$}c1~(SM3+Qs)&1B>A`g&2NB&U5 zkxnc1j3!bf*>;j;tmJz#XM@uR1BjT|hcsmHIs1+rG9#%58PM0;KSn3X3Iexgb~tnB z+a7$761t-M^~#0~T|k@y1X-TW=<5eDZ+t(diU!NcmAzxr$nX1L4A|Ajhp|*E! zne6{CPyY;Y9xlqC#CVX&Y@L_;X*QE^G0YtPJY;Eb#v!C+1|1NtM;?#XE3P1-_B^j2 zh_y_b`q13CEkOlcNJy{=yHAW{tB4Zh(5gCy1^JLo1b?@&q0SYzeB1arv}DCpx+Jd`_I-M3vSp8I`GB zcy6&yvd!&TD(BWM-M!NAvwBFQ$jI3dF*p?q9OAcQ^x4Ju9M@DQ$7OKx2?!m%{ljc9 zec-ZJX;#N0dLg&t0Y|LsSFPoSWKGJaaaa2NSIw#iXvKZ&Xmr1Hwh?2C$`*GA#u^KS z>@v=1UEn5dpl|!Umzbir`?j2Ib-YNMJ#(`djLv&H?wye%A z(^$CT-ke;3uf(~%X!nBNycTxC?L_3-YNnBeNg>nfyFL+9AFtZ|?U|M`)(x3e#d!BdfHx$TDKoosVH?H2; zD-_?6dmP)YR2n#F-NnzIb1fx#DlhwHJX=MBu~O5zLQ#5i?>NQH)*G`j97oP;4#A{E zo>@Q2oKq*>rnORY*ta>X33#G8i$cAGoO&p=&&n2WTq|A2Sq^@7X3F-zK=6q8IjcGE zp3Z|zLdOU~e^TJLt>OEr=IV1|&b0O*Y{<-!Gm6CM%Xtb%M;w;*J--))hD}RV4HXv| z9lr%^Hg#gmEY7~U+OM}fZqc2W=LV6b*$m)s ztNR(}Mn0wqKl@_1uwq=;aZAc9I&uGQxj3q#UKCtc$#9OxX}v^6?2f*iIUDx2uVN8< z`yAZCSEwTLU|@c?tmysQ__J2_FM|4*F1;;qrCb>wssj(&J|~wq`x^I_Gf@ID3}8ku zZNG9Dy_f6jfMdTS65<+VbsqbNqB8Vb43B9p@PdF!>{YGVy7!b2haKRfXV#ro z(jB(P^tN|ncFK@%8O*6QE;Vm=4$)eRHDII}H#cq6UKAXKCj`chA?`G0k`rZ-5ADDBN44}Q`ee!}TJ)f@gCbmfk3WmN-(Gm`S zr;&5z1k_kx2ZJ&ay2b@!Qx?nCH|!clW^eq|>4Q3ubqM%gm|pu~u8MwkO&jT7LOUhr zmM8O;o8^wuqr2TZq-jT+&K<+i6E(BiI(L`bhbyLLNriWJ=_5YFyLwbddOjBRYo#^+ z%rV=#G)8An7ur9>AWwzZ-7E8KkwhnL1egqK#K-A?S6_Fs*Npjf|I{D3rsHQUT}DA- z%)MyaCI2CCOzyfhy2yBWZ14vN^$*hv(#Tgq^1QLiH6<@V8Vt_t$a|NKsQNuAYRn=S3KmQhR~MKl_b zUhE{-l=_loL_*la@~KFL+8nyow*K6J^!}Pn+BzFmGS}vt*YeB@H^qvqrfts3-Qd~#7~!8tIhl6wYxI8th%((&Xr1RBTWmZ#h+Tzk`jwf+|826yM}_sB^AS8zwUcxyJ* zY)!{kn_3A=Uz+FCJeb&JPoP`soMOHFF-4-fSyo59BgJZ&H%~6e#i1u4jxAJiWmV1a zgYTNY2U0GHy9U{3G?1Q6$}i=)9$y64bC?;pOk7wm^1%=Da`a-h?~=-rRsQ(U^I{DLk+WJv(__K!Sj=M2_CB8!fGYR)X1V{itapJNm=lAr|Y5&;|iNJ zs!D^uWmEVpr+^X}4A+}rC&FfGx0f>E+FjtUIQ!49rjMl;UXkF7)##nB7=Nxf8ffy| z>ahDplo{|Y`04%tVz4j2k3BQ!w%(@-ykQzIe5CBQbKAZA^waCQTlCkl@eYWB15Ie15 z-$e4YHS4juteHFZf%*0n1U!Mji-WL_X31GM&ND{rf2^o+cz{^!lMijs=+60ZBw!;j zB2w-Z7=y@L--V0I=0yq-bMPx-UTP`4Oo+{$)B{t5*TWL7X;_byWzFm8!6vP{JfsvA z9T5{hsGQ$N|0LC|)ja$NlGbqQvg^pma?v)#Fg#e$|NL!cDGrf!J^Atp$g-Y=pxuC& zGUA6hBF8Vpa3q;|u4_;Vv@W6n%BX)mpn8?LIraJ+(>h{9@#k8}FG!rBEDO>xz2uy(qq33LsN|FD`<;hqSxzc*(qF_NBs~Rkjx$8wG z?aDGYII3oGV4vRY-N)}do{|i)NM5{RHA+lGj{0<}G8WrPtDoWrxC7yCoe)Tt|};<|=y;aOeqW zOrpp2jH&9om~%eBRZjV1>Di8zHq_@7@fgXi&&nRMKZ68s3VVNT9?WI3(s-AoHz;0Q z?ApB_Y&(v&Y1Wr>DAjVv8?!&z_Q3^!}_JdNtjUlIb3mr;PImLu( zhn(r=$u<*z^%SqyUVV#=KruwW=2$bj?I3tvwI*t&4l!Vm z>-=c1D1RURzPlbrX%+otKI?%%O_G3sl4W$yrdh95v?V+`?BGWXnjK4+Mq?JMQ;OiF zr7HE?)0ndBJb%D8_+qvG@nPt>tzL@# zh3z8Qta=>{+>$-P+M4Cj>e~L@g5!4{4+YH~-nB}wh;>OYI>eghLw7;`{`-~}+$K0$m@C>|bPHEoM zn{jl`R?)3mdM&Ui+%LP1kV|VKCuAgJ_x*QcmT8v@IFB2A6RsL*^E!^DcGm{D)-s(V zM8>=rG0mjk&Gl|LG?=CzKFL|~SJ1{PEckE-M&YOb2s3d=<2%+NsR-joZ)&}oL z*%T{#|lK#pI8GiyA&rOtLlX?3kA`aU7-t zSWY(GX5rh=hu|}av1CrR0>O|riMy-UB>L6bt@ZBlFu%@Nq&>5_WOZS&aP2C?#VvQ@ zGpk2ayB@>Q{F5r$?S1z;pJySqBQg|gg35;c?s;-#z9&jqDWQz*J`& zBtQ+LDn+kvNC_qx|9tj3)bkAKiW#x32HR(a`pdb9*qw4&97c~-U$2s%+{EGf3t2?( zoz|QT53$`mG0K<#6Pfv8`NU4(WrW%gubtcRgD3Xv)vc5PCm;dr!;BM9g1N#%fsB(H zcaI}I8s52Truew*mWo28*ZktsFLbl%-L$hLF@&>6MZJ2<1R)uM=Z})@J9j0rbM-X| z8V)qJ%I#`Q1)MdqWN^x2YOUCm7khRbiEx#U7*%Z8ox^U$1fi|kTes&}g9M|`siX(Q zc~0~{M#NrQ=#7FdrPbbkm?4nWq>5tlr_*J*q($)X+WQ1$tk4jgC-{g<<_!z|t90C} z)jH<276)67p2V#O(DvKuE<&W2>)wqHIe+qLDAP{TqU}~G>FOVChnjmed*>nanbz36$_0aO@S6>Yt3RLg~j5E&;4C5Cwc8|P4U%^>MSgK;}z_d?M_|xkjz`Yu0g%Gc0fA`>elSxk9 zn-6A#j|fb8!ZRL}wz{#KmdecukmX>=e&#FIBsuZOMraNxiDak!2%D@G6uPhLoaQ)I zy;Dv_E;=-yt{nj(A^4=x9LN}vo3u%vk63B&OWDNV|LB z4));m=kaa+-rsO#B?Gdh8Va3{=vkti-@>s*Fy_T7s9&cdqh#qNctC!m+kV^aJD z^kJ;b9&WJVDsC$^#Nh4Cl-o&*kqfKC^xzmwe*HF=YXI}7$1CsZWH`grLemge1Pm-) zSso`SR3KzPUM2G-5phkNG4>|vKH8NTVR9ocE{r`SSb+W*F4ddJr&&@-vrB`kUx^RB z;QLabRWI$EwTBIPO)D>9YUJyYT&i5KieM3+Fp}%@crS1C_4)j()76+W=RrGrrQY|H zY!6J#wQHX&u&pw?M(6)tmk*Rr27z?#`(_MGcx)Y zt!|KgOfj*$m(?^3Y!ES_eaiSTzhH6IqM6aUjC!-!upGIns~?=FU-Ri*Slq~`sDjXU zJnuJY^csx0JM(9PmU82dRE|j0wVf~cTq+h5W_vN?*lb1N!NOKVb6kC-XKhfgnDBVX zONO41B(sMovgws&6=7`JZqfdAzaW9Fpc7C_L!3FxDv=V2!5Tu(0{a%0lIAVOl>%x5 zIJ@H1Ci?|5#&^j_^{Vcf#6BW+lqTk|oUZ+RnkH3S8zAdJsJiRg@E2Gr$vt2+%h{2Dl()?75T_!Sa5xer_IaC@)*EpPMTNC+nxc zb*fwzpz+nBT;Nj{FBb)_tH7o=Rd-JmSX@M01R@M<)boLH0XjVG9b}Ew;J-BhR|;Ie zP4)Hl74a1larbl-h04gth(chZFqkmFA&m3Kc-i_1V{qJmG^n9)NKdqf7up>I#%r{- zbNBXA-~#$TH4JD$s`~*Izg6R}PZi_5>_xSJ3vmDlz#s~PK%|8sFkzSs-u7Rh+&uL3 z{*wHU{&REtiyP;q?(=s)|B)MK;_rbHHAdmwy*-gAbsrSQi~CP!93Z63j}%3@qTGPaaezkX zU%V(_2jySf|6)JB z6u9uHWbNINXnWaTq=T3}L>vYchS=IV2#ZTPNDE5?yVr!pVGs!^dkJx*jF`k9Jz5x? zmn{Z~!s`Jvi=Y7|_I5}qq!mxH^fn*x_E8sqKvyVeBlhC+DR;#~xlkc3LXfdB76AYu|?Qj)(p z%ut>m$yC27zIS(A9Q6ozZWc7UBFZLMeVo# z-|MA{t)nPFivI6h|K5Uda`(mjKS=-K`6H|B>E-M0>0;n%VCRfNdi^ib|C8&FyfJVq z!FhT5YuW!VqW{A1Ytm~167HV~G2y4L)62Wo?mocPMb-Y<*C`6#Z3+bh5=b z;=K>7V!vw8|6$T2rETrRkuW=9V1@&eUfNzg4h6-4JPPc4>`A(-R{~tbIClp|eAOtE4#NX+d;O{=a?8YzFz&iFfB>YM) ztA(GW2yYJ$S2W7=Z=9#H|B>PE%HPrRzY6``ek!Bt?%@xtT~1yGzL@{q)&Jl)mD5E7 za|P%AAC3D*8?f-4YRCPy2bj5k4;FCilm+GrFfsjvfep8|-mYF;ivPB;U0xLLD_1li zbxQqGAo@?er}h81xBp=U9vJZCU(X+k;QyBYdf>kv_^${4>w*7z;J+UD|DOl`euII+ z01tw`!25}l4Uo0rpP#5;F%bwzT186_pH2jnE35>TAR?Aa8mxp~AgX_RGGQf@0+9hv z@-h%fm^3Rs^$JAHN+=IH3xz?U5-?%>=`_cR?mQJO)j2v^YFY+*I!1OFCp#-EyMmAi z4@~Qdu8zhPxVoVw&d$)(%}gC`{|M#g8+bSPuKtb4q==wI+^xGePjjs385r1^+2uGn zVMdql|I)azc56Pk#b(f=*iSX;-Qsdm;iL&~7n%2(NxCVK*zTmC(yPtsJ6CFpxh6lz*ZnAf%z? zq=piKVai+pE;7Io1SEteVINodnJ@>Xm0=9%#YQ>kv-=lRFI0v->$29Y4ecJ>C7ac& zmUHt;xRAhvdK(!fxfrM2=5%}5S<{nskij{?d0j)rE$aMN-5#d8LY7x7W^>s$c@(6J zjN>IyE}T`JJ_q4>iix8*pA!&I>jsYz<4kkW1HR2C0g6hOw$Eqg1Gci$0Lg-YsegGM z$l#-<0TV)qfMmB*O92>41ktQA?^G!G=?J&x=>!DZJ4}S+v+Uj*A|Dx6uz8(HQDxx0 z0?E7I232Ri4JhRVh8@5U0{;FK0%{sUPFk=s1Zrf9J=IHy*Q*njh6-o7n7k*>gN>-D z5aDHnH`6|=l{8=MplPdr8TqJFJ+keS`B!n7%C%x_<$6~+1+(MS#Wp4fp&p6(!_Pat z$oP9uUmd#N3%P)XkFc`fnNgwfcqypP7q)S7z#1sx`MU+JrHM<2+OXA zExur-+@}jk*b=W%DwxmJ4yM?UBvi90#t}#)Z^HJfYnZF&q}iz_uKBK>;DZ)S4P>R^ z#~3KTKoWpxIHAob! z9f&lNG}_S{&T&0oBPeleK8zZj?arUrS#8~Z`LkVTk%(7RETl2v&P;==@jZg|FJ~u- zEVV{j9#4aS?Rkj8_;ZyWj(ZnAz7D0$W1?#9sG@Xc3hueavDhf^`HE+7ILYEGz;GWgdZ;bV|`=oOi`(g#Tuur;$dr8B2h2C?7CO!-z}8hHNGB<89^Prnq& zUCF{uRj76Hy{Q ztV#3O^~SJP5hB^^Ld>dU^Cl*Rlw%ZpMEL)k{pI_=&r6-$)4j=;n00C2yOW z;J3)&XK>5%nR@M{5v$|HD~?%Qh88y=k6|oL26Ngn+#ksV6VjFDx>c-Ha+CrQgW2fZ zm5I;O>|(bL4}UU|z^5Av(5^)3e$mNiO%WPMo=A~$gNb=77KzT+BJ|%mkS>+?&1MUF z-cCsE;WPJG7p~L zHDpu#5$r=S_M?|G+@Wf{FeB0Ig>Le7f9j#0UVFWW%h$CsO3X&RgID$Z9??^sx&Or% z#Jv!-#y6y`X(>XGfSopjTIRJ{9dj8%SCy!*2V48cWEYDU-@QI&d8LzrBq>*?lv@Lw z5TYvGe_=vZ8^L~Egbp^SH?KU4@lZlg$AX+^wT`dp+0Rbo( z_yL{F2{2vqM>&H)y1K$3au5ha0iq!w1d#$%AaK$GG&OJ!0ZtYI5)c7!j{#2B-!vcs zBuGGXT22ViPykN_+_Qia2G9(^y%abfg%W~jf%{9~MBOJo#rFer2*9KP$9Q6XUBQ=( z%)qjmAiQd)m_K)IT?1nv!423B`AqrA*AVU1B>jHs*Knee#3G;u+0jfYm|DZ|#LF4QGleYnnN-01j6`(R; zsH_4^Mgao*A07ov2IT;!q+|_n1><``QUQWTgCTMX(f|Sdk7b~1WB?w|jkgeg|8>RN zfPYf@eFw%K!LM4yGh7DFwp|A)uh3pgKdvL`}^E<6`52{a+s^ zuR(O=pfUo$SZ)v@9RU#?!AUEK6VQ_ch+BN<{yqo@iHJ!^$;c@v&j5g0TD+%-2#JYE zNQi;aLJ$npgNW%!=((V3q~}cFWZYN=*zNdSavt@nH;kr(UwI`Ry+bJ|&tG6-X5r%( z5EK%Yl9rK`lULBtyriYAqpN3TZeeL(E)Z55)%^_uZ_*c;r$>&Z z>&~1}py+qQ>G@3A_c62N%e`=PFFF;&6tmk;5^x8%;_Di47paoaC6R+beWwF*W;}?k% z4@*~H7_01W6$nn+_qDWj8CsUf>V;!8*f7WDlYAVt%T@^{e)6TRQoBVY0z;4Pi+vxR z=mc3&Qz@4AK5opqYfAnpK5(=i6lUJ_=5@i9u>2Oi-fHp=>7*MImM0+PG7H*<*QF`v zMc=%en`HZk$G!!`OjnYu)94@i2tTva6vtZR zV+^jOiBf$kh50_oE-`4gbfHR$=u6bgNi+)x*f6_9!TU{DYr%FsoM*m5=UnYoJ9vHk z;b$Q(o}7v-4%C(Q((@W(wRXY26s{HyXXR3qXbP3xXp9=|jxPBX6X@K)>~PmSJHUwc zFfmP^dt+(8T%>fQsyF_0`25g?CL0w5D;44d^kn0z&T@~>fP?LY@M)U?dfUgPG2aiG zS5w59!&f^h;5MH_6&btl`4TT*SIkB?dwT6ZB_r48$ic;ob}31r%}{yC=05b=q&Z@G ztAcTr?fp9Kf>0fmEr>wYC4Y2!ct?pSdBKf#))2YrVX1xV%$w$0{ej7S+RXmP>QT6U zsF@Ju2PLY*;NG~Kwd?TJcbTMV`!Q)IcXGx}b60I_6XdPH**#Nr&ans%%AYJHSX%#d z9;I7)O9iGr8|R~PWiKGow5O901ImU2 zzBo)PCJue-!Ga?(xAgj*U2sXtZr%o}=;#C0d%`d3bRwJ8I@#qb0unU5FS!mr^rl%f z$g(npT3v99$*z#GyY|uRmhg&)T=djivEBp-C56CK<}&X~!na?_a$2-G43@{iI$tLc z$SwF)xTX6qUaqDK!FE+rJ`%Es{(|j)SXhbs+8#`UuBi(7$k|<~flhx-)WC>ZAEBZ; zwkSMD^|6+n#d4-o^ld1$M$IxC%PS>CrmyJ6*UaTmAJ%wAc!?5NJb;mB1H5y%CT{mZ zgb92sePJ&b5%olMgL37qNp)khoa@!3B$$_~g3cmI&`sYx8Mw@4tdfl&A z&8X}tSSTK${UhEkstF&RfM!dSwv^p8)EXM9rjXIJNy6GD`!y4z#d`YjUwpS-nsK%n ziN}{b)-39Y`$|_AL%4V7miIOKLu$dn9Zu+v(3_jRuW|MR(b0XQ3m2g6 zDkGaTYmxAxCmEf0pHr6yo?G3mjIfGr;?exlXfs=0EKa0A*ll!W*V|;t?jU};tm}5I zH|Z^wB@f>R7(QLoOYT(h&*tN8#{ydoziV4w)0r}#FEGoy;END4vzoYbrTi>cLllKw zoNiag;ySvpq&U*;ZanODjy<6^&Ni+2Axm_AYA;R5V;xAj`~Vr_VoG4&j3oINd$!8C zhUCrNO5|PL*&mFCX^Iq{{M*OV;wK;;@Lh>G?DURunHFo0Tf_MPnKuj>gctmj6=@Jx zf&+qYj0c%&%Y3VDbXdlKmZOf)b`Cf7n`)oVgp+CwGhI{T4f>fiTA+u0Xnyr#5_xUz z2eHyGD38u$uKtatz#z`{e1-h;J@<_%R>u#8*;!CLwbnd@*6$8C1O$i^2c87hmQRcI zr?rUqC%tBJGA=6KG%YFV@CY}tW8Ow%?d2#w2j6PRUfhi2c_khCO*xlp@iv<`&dySn z+s>DntlkLSUi!>~Jv_sslq-gw^=pN{R9!Ot-2K@OqvK{1Wq-yJgwk!M^K$)xM5VkWcX@@6}A-MsTH+)R1HojhLz9EoW4GseG3k?T6P=(=J*tjjj)n-t&o znS35YK{WlX{Mp0si(UP}ZVl-=)oC+|U4qxXzvd}aNoo&JZ6-eJExatQlV|aGWVog$ zY2aY#xw8166W;@SLWNkI{cc2jJ4{P>nURJi+N2Aja@C6EwgZgMo(XC<>mltQ)^Z^=XYv~|>j&eE1bvgowD)Pla=`7!R0D=4Jg-yk)Q zONjY$Eu8JaZ8CL*?eS)hNhK?pZ!Ol#eEuAT31gh~T-#?EzBUDJk4qU8bvX5Q_F}ub zb`FIFDEcpiO>jzDt)nJa$5}9WBz-TCAv3zyiM=5Th_UD7Un;pZnrQ>>2OQpF`LrQD zRJ~3YBgf2n6gYKn!^Ogx7|MR1VPz_)h9!ZOwts- zZjbBr!fRnlPqlA(2fLXWJ>BA^=5fAe_`YbdUGcc~d{L#5djunosLtoJ(1B)@yiB8` zY5L92TJ2dKFK?4f!{`Cf1cf5jw&RL&;z9woAwv_NX5eDW3j=KcqTq-G>PG#3+=A~6c z49}L@-H@rjbXFX>djhgn{hqdJjID7ohwjKUKD=SUHkR)h$tBk!TY%J8+ShR~F~Y_u z981p>glX#9IfsIKVWZS6Y3&xlX1iqR3{7 z)FC#TJ?kLeDhq;ZCu{aZ4ZWO;g92ZD%8svRx&_|f)uhx4_#E84`e_xc{uEdV5 z$Lje+me095?9I0?>Vs&oW*Stw98MD1d)O;twy?d7;_dwOq^K_G)|$KVlSE!1;f|uA z#v}b!+8G^$cq)97b*%h*Dp#ZHMDF8gbtB<{*h{>RPn%cIG4 ztbPRU2%Ut||) z2Vpm~;zg+B4`aB0>h=HB?cUiOs=I8g6;)}DJ^K83c&eo>D2Vg88FjBX&wp2{NKsKT zHcTfd^~<^SYMS&#(Q+~~hpfol+S(8fYbry3u@&LjEYYR7bt3HYW^nMD2ai)M#qAZh ztQVOZLHr2Y>+jm0*CbUqoTD_9cb~6-vkRyVd+c8xTVJ0b5vQ2vSe~lGq?_G?RYTz+ zH_i8dSY>{OpodF~CNW;*@>^#We_GBYosY26dK$JkIO7~vK7$I3G@wXC8I)BK(R!aX z48q!`Onq#AzAa4yoli<~h`39PV6Tc1;naPWJi8M)F=>T?ye{e29D zy%Oq$dYLwl$LU;KH!ts1M4mA~m_$dvACc6eVby~8@0fgcGo|L9>g41Hr=EaNvD-f^ z2QvrweJXN09x#e}91pl)-M{KCHKyuNh&w}A=e4Bv-OYE$3@_OlvRs#b+ATgX$O*~kz*#Tb(W1v!X5rR_NI8?+ zU5qz}gz|>FHkrGZHLe$P^HGM~ZVz6AC^rvuGFuoqY}~eH83=c|vl%X~)Eu(tXsR&g z_C0w@QTB!&6UPPg!p=ro#CB5i`+-N z#o%Bf5sGZ6kz_>DW0}iS^5)D(pLVxfZybj^ZE6Lg>K~qfgn}!YZn0sfsxep3tuS7I zH4abrau`!ARp`4dSNhK@8(5m()4!WAO~DYwC@Xtwy!mQeZ7GSLso~8H1F6D;7#*!G z7oPGOH6?Hs0XBZRzBgZmGe?yU8rd;Ak-1*Vinm`1Z17@1}ca@{*CS2l=_-SZA{+E6Ax0a5lfU9-Nn zS0c5e_%Oa*wIXQHzDtNB|4LfwRAJtYME0siQ`M$*<P@%OAWE`MUnY+lOw$+tRfxPeT_g9Ny~`T{kt>w3AmWQd`n6+8BppGql5 z`>d9;pJ-L|!NA;ZW$A~viD&GbUIh0s^Sv#0r&=B#ss|6kpHe7Ueoc7Gl`L&G4A^YO zy#2~~^iF}FGmhh2&FIzEy5ZUIPqE9w33eO(CQToGJ~f3ON{g;RWWPtah?{k>IW1SB zM0pZ|^@ySZf*Se*!z>QfN(KrX=w50a5E0YU%JW(j01E^(lCSD4*L|l%IUNBXJ+|+( zlk0#VGs17j?NlP(Ua+Fo=4;;W9HO_EY{bYhZEiYf!ixDLtv@a;4L)^k(Y%>-yL4j0 zenyRNzP*4#&|&X~OT!N1yP1rZzIOzP2Uc9=11Mjm>xIG9-Z|?6$!*diBH?e-bj2dy zY3E-&0X=W1he4SL-4lYaX$zI>8;*@5@2~yT?}Iv%cL@8Pn_l~2rGa{UMGp~BPCupS zQ7HeGhc!g?!R_uHvW%lmbjNV)MD2S${o70J!&Ou7$;5Vd8KXXHb@gbB^n5Dm*Uf1D znQytpH^$(^5Z*s@L5ap}cdydBMFy3!5okWFotU5xUVYukQ9I_}{nK#dioU<8TqPxm zDbE7DOX*|KnBrA?RH^CG$j8H(P8xPam!W(42`KwL)8P@Y9dmi7jwL>uQTIW{2i; ztlWd8ZB`8{j|_taLFy9~SqjX|by1y9oMl&rn!z}kOH@X+!U==aJ`0$1_GxcTZd z)ox8EKC`eBljED?(m9yeEtXV-{sw2&ANuW?M*v+{oFo8W>WqDQ8 z_@m#Np%+3ig{KzLXEKnPMZ% zr+B}IPGot6YEK-BoQ4|TlfSp0u?wA>yiIUl9T!n2O|D5EGgBIpvz#{X>&ehX6~P@b zX;PD+^_E@vv!XImd@#~rf`bT~t=C?`jB9t(dd1a$b~SS>v*e<*V7zwkbk+D%mC+#c zr*?{OX(0j{rDdNfGT+{OiwoMf~kAvrdftKRf_mTr9f&9_w_00{T zB6n#RtKu$j8xlr+nSW*Yu&Xa}$~|I?xiHBF(I7}fxGS3(0 zyU`;^%uJyalr}3H|5WvbYJ9nntm4eNBkYhCJDU^;$;dmC_K~gB9Bs?={N7#m;b&ov zy2F0S6l-htWA(W+Ax=R>PLu?^LBPU6#3#$t+-qld4_rRe$+Y4*I{eKCjNyz5JTH;qWj6o0DrH_UeCur1J1 zdtNTR*r)cC=yfIXK?aXo#_?c2!hWUrgAYG+pi{(f+$f?;M`xb+1mp_>*1v_dJ{2UW z*_%BPs<6peS}4~tl0Mf=dU+&(dbXxO#fGVZ8OvqYSd##<9iL>oCeW9*p5;Q)V_ZK-;BL7Ol8F8jGP&UC&++DNbP1gaxs5 zy{M*NUg81A)GiF{GkU!H^qtpR<^miOfhYkMAhbO=HmZrnvxiD&olZOlx) z*?>_2`oUgl(Z1G)?gku{UF?^++ZRPD-!4_iZP3@oEjkgbt=S%}uI=A0K7QwQU&PA!U8^*!WS2CPbG$_nbQcs5u)kZj ze}3z2(ScL0QjFvW=~=fQnp5S1U>{3M;!R#E+zL$vVs*-*Jr+gL;Gg<1?nbqdchGGs zDyyd6tRrf;ipMj#wVAO@oUK!wC z%SK0ukNGfRn#sOf8C-L2w8%Vsnjf0uqQrqaFfH+P>0_rYoYHzxP4P|zLc{Z|Sf@m? zO{*hqduefGG5uveEXW-8iZGLBBQSd86%k|qAZzvcgan!8uO%Fl&6i~4y6a7b62j@N zAk*eq*o9WU&VvKBSiR=_&jTndd(A;u<|`jW;t6QHAX$-J{_U5j1ZeN0aOz0po0SFW7%$2Hc(*N$QvZ_ zmRqD`ejdYS$=mSzU{bTOR4(>nk+3%D+pAZk`!(C`4es!=yv|ynC*54MJGW4>W_{uO zmM1ak>e1A$*Kn-Rq`F>v-<{55JMN&+EK7JkUO-i(^s1~9ycfR^U z!K^o=H9yNsa<@>DDlX7mer{MPxf587&>RwQ^fjac!r959O z@2bhuRh!{d02Top9;G^U?n>ts z80ru-9%yepajdlvMr-HF`_8d8rwbVP}G_VmNhutbkqFZ%0Z_Tm=i^Ni^ zX9gyCPxL=Di@!488v|X;sJnGPOE|Yl1IZk~aG8})m*D=D4@qiR(IG9~&?9d7H>`}W zGI6g~>si)XoZ*~3$y@iJ?YA=BM9KK;-;EBTSFSf!>ZRz?cdM6o^^dkgt$dn&3lJ9` zO^|5Fj82DbntVQHjB${EqB2_n?s2Ev@}l{9WprS5Pa)m#>9pudRQD5bm=`$f<-}5L zkttiolU!`Qgot*bhH^Rfyf?oys3f{3)_&WhjuR&cJ{O-BI$-GLWE zMz-#(50jLu%;Z5n<#Xjx2~AvaPUe@c>s4n(D2}{1H+G+39{OXr!eF9^Zc#nMF+=Op z#l-M)elLYv4KlvjdpS_lwh9oYN538^peX>Wi_<6+Rg2M zo7Q5IBqXK(!sbyAOBUSX1mwbW=*H7pt>Y0=b@y@R0;TO)R#u}Txm5?=LZPRS-23uv z28`umq2IJRMv9-9T7Or#u5VW5$o&a-lOvx>SAfzPIt`* z=Z1F*Cn_hIf?N(P9-Lh_^Jt&!1l0R6#2!hNKSGrOEaaBXF%s<-zR6Nb!M>P`;+NU8 zw=$WV&Q{>%joMHqINn7R{rK>m8smBb!rpqX+W&mbyIVp-FPq{8+u2=ixjwT^(<1ct zjJ)A_yKCg1(##$28XVFaXVnO1O`Vq+`J@mQjd}Wei>@Q|qn(ZjPSlLVIj-MSF*c&yd zBt2a8k!K_%$?hSFZhB=~Ll~d2Te^SMKUjDx_yp9_m|z97OQu3#u*T3cz`ljWlsTJm z)xf$yuC4^l$$pWn@m-2hgPJ?$@ehby>xVHBJpJ`=oEIr=lj#Fh}FFiNd)7*Gl49yEavT@) ztoI8;$1zKSYLore&E0!O7@1FbmeaTs5a_s8_-Ly11oUnD^ub8eqK`CjwyaBx+9K^9 zb@s_5urrQ!$JNKjOF=>cgA<24c{(D+5uP3r{%|h|s5nFdq@?Qa1xL6ceZY=L7ZgUB zd$Z{kHyGul%xxuY05R~=K)RxI0=rG*d$?n8 z3jWI6r@|Eg8ec5I4L*hVxG8g61DoD7JiU=%DRC)rh#0U@?>dYdkm2p*tYE6C^;-h) zq|E(WRX;yJaX(3MPj43qsJy(q1Oz4lgNXqMFFcA+4YYr%7*K<_wmZNAJI7T057D3DH7-D>y1EOx{k#7@ce1a%iS0E&$fJV zNPN+$uI^6G5`dbgJ@yBW-%I!x9`7(0l!wDI+H1j6}*JAP6bAjLaW7 zx)_`f9D_jO83+<71%-*hC8d#KQV=aG zeqy*Xv>B0@hcb0;h7t-6u*BfaGclSjq zb4$WxW#k|dGSbqLfOk$2m;AjvaR5Rc1;_nHK_sC6R%zhr1aJeI{|k$;x2KCY954Xz z)W1~VjR5qQfXe(~(Q=35T!ErLt!jB9;O>`^7#AO)ROasnwT;cx+XZkUmInpzN2{fZtA)GeaOTNDa6b z-W~Y(`&+n~tFI5haVqV%3#5SVhsphmqpVy}K1g-AJANdc^8Pit6*S>KNDXf!a0TW| z{8myih^!b)#{AD#QgH|bVtYE86i#8D-sYa3?#kSM?F0T*_s0?leop&6VJTb&JcXas ze#`$oU#i1hBmh$4e@Fd$jhU;bALjpo`w!+1ubQ`ypQpE*k++c}8j0}vU%3A#>JPsu z@G8Oicn9b@{V%Nl0{9j5Isk{KcffB3GbHvm<&FZM&a4V>1l}FW+&K7kBoJD^3K6bw zj0@iTz%2Hw1oa<+Ue*zbL^wj^#UOBe(91%d#pLCIV3mYR0y|%!P+55>FqD7s<2;>x z{NUb5H5Xt80=f#Ae@?e-g9T5+Rp<}S&lL$MBLaac0ORkpPw;P_UwY#wYhWJx+a&x< zuAqw#QZrvKFLxBu`)@l>dH*Be@50}s=YM7TyZ)3%!_zAOn7dqkjQlYFxvBqwIOV&H z0%8T{`5%@0M;$QnoJz<2)(42(zdH+fbt(X{0tBYN7_i|M?(6Qut@3Xh+m$5nzH&za zT&L780TTa|ds_aFcl#eG@WFs5|N8z=0sr^-uLu6?f&Y5ozaIFn2mb4U|NnX5?-dLr z2KW&41J)-_HbC~qfBr-TONv84a_YJU_;e!RxX4Ce3nF6W(`F;|0nz;1mkAr8EQlQV zl9z|bz~tEQsaGIkHbN!P87K@2k&+X`-%fL^XwOicrJ|%gOMRA}j`kdrAPdv^^Gr}4 zK6XI`No6H@NjcezI`)PaHLWhm${Bl@SY35=adlBKzy^3Z`P(C15T`j-boBI0j7$NaI&#}?~IS~;6=|z8XteEi$W(0rbR{G98$oc3Agr zYMMds%0OxS^8&fJ73jA<=|SJWbD`45X0ROUxH zYcXf1Ti_a0q9v(f=WJphGrJ}$Io>3 zV6Qaq7qlfu(#74E5qHj;+o#s}?7q^NcUG#K?Yf{pca&A^;DZ4HQ9ZW$=L?3BgWbb) zu||Kl1&I6OFkqC>(Q?tNfuSZu@YA*k@okxWx?iro%?+(WB{?Ng*tA7G6pg^TT-c;Y zTQ||Eh#s_?BpoUZqyi4}-7c{KL64EcvczjgJfOI`|6ENiZo z+vknP1-@r>Njjq|=J`CpzxHidvK~$~g#C1RIm1WGJA8(~` zJuV=5?BZ^xu}`w_*)z<`om}s3JeaiTZ1_Vz5%ER1zgK&Um0$5Y+u>m7v$$(jjTZXv zQbAesQu@$pT9&3SFZs+>Z<8m}-`?e$H=X9;&GV1?Nyv~L@G4|hSS+$Bs^xbp=q*N6^Sx(h>L4;w%!M6#mfgUL5@Nhw^I z_-v^fd*ON$OXfOc=fX=&QHwX zLCv$TVgbF`|I+)9;-^D}Rt=8*r8qv8Xa|2jtGaz7nX3kwQhBar6y2hmqFRSjAxSyU z)03YDXR>K%L}R2pPpSY;Z{#S$L1$5W?kx+3GJC!x@eGrq9R<^?D$O0->Zn;npgLZ=V*J& zPwU{#diV2cSq$wy6Hzs-i<r7Pb! ztPLRE20ZZtsyTva`eVAXP!F+6|IquP2Oo;N347$2Zad%f`FQvXFb^inUaf+29%3sBi zFq715``hj|$>Zp$&(%QY7>4i)O;y0*bvEG(rG?R2A{zQ*hfH(vwc@#IuCZ|{2=oV} zsof5@XTPtB%7dB)?%^{cDvleQ1g=N%bR5wUrWAJvkL}Zsrk;*j9X@#M=H@1E(euP~ zkYH$F6HXe6&D^D|I9q1f9=Av>y)ELlw6IGjnfc;H$dHI{R7ebc%b3%f*A*omw^0U4 zh-goScg+jaT=!dljt$IOh6|aCRcYJjM!qLYr9&w=-Pnn}*pcrs@2En7*}3~tt>$XD zJ4-}6qj9w5l_ZJVKHUsFMbuPXYIpWiq%9wfHF_0@rgk>!DZMrxZ@0hLEIX$HrXdE6 wKROfg6ZfI_@@MLMqdy;5O~hu9@Z}T}bGRL37JB}O7&^N2q$K?1z{%wQ0n~irumAu6 literal 0 HcmV?d00001 diff --git a/bin/make_manual.py b/bin/make_manual.py index c10725c00..0ab298646 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -35,6 +35,7 @@ docs = [ "ftp.md", "googlecloudstorage.md", "drive.md", + "googlephotos.md", "http.md", "hubic.md", "jottacloud.md", diff --git a/docs/content/about.md b/docs/content/about.md index e4fe2aa98..5f90ea616 100644 --- a/docs/content/about.md +++ b/docs/content/about.md @@ -26,6 +26,7 @@ Rclone is a command line program to sync files and directories to and from: * {{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}} * {{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}} * {{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}} +* {{< provider name="Google Photos" home="https://www.google.com/photos/about/" config="/googlephotos/" >}} * {{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}} * {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}} * {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}} diff --git a/docs/content/docs.md b/docs/content/docs.md index 4dcd3e24a..278c2b894 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -32,6 +32,7 @@ See the following for detailed instructions for * [FTP](/ftp/) * [Google Cloud Storage](/googlecloudstorage/) * [Google Drive](/drive/) + * [Google Photos](/googlephotos/) * [HTTP](/http/) * [Hubic](/hubic/) * [Jottacloud](/jottacloud/) diff --git a/docs/content/googlephotos.md b/docs/content/googlephotos.md new file mode 100644 index 000000000..63b6e171b --- /dev/null +++ b/docs/content/googlephotos.md @@ -0,0 +1,361 @@ +--- +title: "Google Photos" +description: "Rclone docs for Google Photos" +date: "2019-06-25" +--- + + Google Photos +------------------------------------------------- + +The rclone backend for [Google Photos](https://www.google.com/photos/about/) is +a specialized backend for transferring photos and videos to and from +Google Photos. + +**NB** The Google Photos API which rclone uses has quite a few +limitations, so please read the [limitations section](#limitations) +carefully to make sure it is suitable for your use. + +## Configuring Google Photos + +The initial setup for google cloud storage involves getting a token from Google Photos +which you need to do in your browser. `rclone config` walks you +through it. + +Here is an example of how to make a remote called `remote`. First run: + + rclone config + +This will guide you through an interactive setup process: + +``` +No remotes found - make a new one +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> remote +Type of storage to configure. +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value +[snip] +13 / Google Photos + \ "google photos" +[snip] +Storage> google photos +** See help for google photos backend at: https://rclone.org/googlephotos/ ** + +Google Application Client Id +Leave blank normally. +Enter a string value. Press Enter for the default (""). +client_id> +Google Application Client Secret +Leave blank normally. +Enter a string value. Press Enter for the default (""). +client_secret> +Set to make the Google Photos backend read only. + +If you choose read only then rclone will only request read only access +to your photos, otherwise rclone will request full access. +Enter a boolean value (true or false). Press Enter for the default ("false"). +read_only> +Edit advanced config? (y/n) +y) Yes +n) No +y/n> n +Remote config +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine +y) Yes +n) No +y/n> y +If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth +Log in and authorize rclone for access +Waiting for code... +Got code + +*** IMPORTANT: All media items uploaded to Google Photos with rclone +*** are stored in full resolution at original quality. These uploads +*** will count towards storage in your Google Account. + +-------------------- +[remote] +type = google photos +token = {"access_token":"ya29.Gls1B-tkDOExu0hSWK0UdQ-6fgixD1Rvs3jlzqWgAKvQD5w995leAeiY96_fIccnOeW1jn8KboLTNUSp1kkXGPohBZAHjN1nygUiNGimZlwgs91JDmtf5JHNC2RV","token_type":"Bearer","refresh_token":"1/wTok2SVTqtmu6BcrUI7LUOu7MnvUN52m-rGFfanE50c","expiry":"2019-06-28T17:38:04.644930156+01:00"} +-------------------- +y) Yes this is OK +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +Note that rclone runs a webserver on your local machine to collect the +token as returned from Google if you use auto config mode. This only +runs from the moment it opens your browser to the moment you get back +the verification code. This is on `http://127.0.0.1:53682/` and this +may require you to unblock it temporarily if you are running a host +firewall, or use manual mode. + +This remote is called `remote` and can now be used like this + +See all the albums in your photos + + rclone lsd remote:album + +Make a new album + + rclone mkdir remote:album/newAlbum + +List the contents of an album + + rclone ls remote:album/newAlbum + +Sync `/home/local/images` to the Google Photos, removing any excess +files in the album. + + rclone sync /home/local/image remote:album/newAlbum + +## Layout + +As Google Photos is not a general purpose cloud storage system the +backend is laid out to help you navigate it. + +The directories under `media` show different ways of categorizing the +media. Each file will appear multiple times. So if you want to make +a backup of your google photos you might choose to backup +`remote:media/by-month`. (**NB** `remote:media/by-day` is rather slow +at the moment so avoid for syncing.) + +Note that all your photos and videos will appear somewhere under +`media`, but they may not appear under `album` unless you've put them +into albums. + +``` +/ +- upload + - file1.jpg + - file2.jpg + - ... +- media + - all + - file1.jpg + - file2.jpg + - ... + - by-year + - 2000 + - file1.jpg + - ... + - 2001 + - file2.jpg + - ... + - ... + - by-month + - 2000 + - 2000-01 + - file1.jpg + - ... + - 2000-02 + - file2.jpg + - ... + - ... + - by-day + - 2000 + - 2000-01-01 + - file1.jpg + - ... + - 2000-01-02 + - file2.jpg + - ... + - ... +- album + - album name + - album name/sub +- shared-album + - album name + - album name/sub +``` + +There are two writable parts of the tree, the `upload` directory and +sub directories of the the `album` directory. + +The `upload` directory is for uploading files you don't want to put +into albums. This will be empty to start with and will contain the +files you've uploaded for one rclone session only, becoming empty +again when you restart rclone. The use case for this would be if you +have a load of files you just want to once off dump into Google +Photos. For repeated syncing, uploading to `album` will work better. + +Directories within the `album` directory are also writeable and you +may create new directories (albums) under `album`. If you copy files +with a directory hierarchy in there then rclone will create albums +with the `/` character in them. For example if you do + + rclone copy /path/to/images remote:album/images + +and the images directory contains + +``` +images + - file1.jpg + dir + file2.jpg + dir2 + dir3 + file3.jpg +``` + +Then rclone will create the following albums with the following files in + +- images + - file1.jpg +- images/dir + - file2.jpg +- images/dir2/dir3 + - file3.jpg + +This means that you can use the `album` path pretty much like a normal +filesystem and it is a good target for repeated syncing. + +The `shared-album` directory shows albums shared with you or by you. +This is similar to the Sharing tab in the Google Photos web interface. + +## Limitations + +Only images and videos can be uploaded. If you attempt to upload non +videos or images or formats that Google Photos doesn't understand, +rclone will upload the file, then Google Photos will give an error +when it is put turned into a media item. + +Note that all media items uploaded to Google Photos through the API +are stored in full resolution at "original quality" and **will** count +towards your storage quota in your Google Account. The API does +**not** offer a way to upload in "high quality" mode.. + +### Downloading Images + +When Images are downloaded this strips EXIF location (according to the +docs and my tests). This is a limitation of the Google Photos API and +is covered by [bug #112096115](https://issuetracker.google.com/issues/112096115). + +### Downloading Videos + +When videos are downloaded they are downloaded in a really compressed +version of the video compared to downloading it via the Google Photos +web interface. This is covered by [bug #113672044](https://issuetracker.google.com/issues/113672044). + +### Duplicates + +If a file name is duplicated in a directory then rclone will add the +file ID into its name. So two files called `file.jpg` would then +appear as `file {123456}.jpg` and `file {ABCDEF}.jpg` (the actual IDs +are a lot longer alas!). + +If you upload the same image (with the same binary data) twice then +Google Photos will deduplicate it. However it will retain the +filename from the first upload which may confuse rclone. For example +if you uploaded an image to `upload` then uploaded the same image to +`album/my_album` the filename of the image in `album/my_album` will be +what it was uploaded with initially, not what you uploaded it with to +`album`. In practise this shouldn't cause too many problems. + +### Modified time + +The date shown of media in Google Photos is the creation date as +determined by the EXIF information, or the upload date if that is not +known. + +This is not changeable by rclone and is not the modification date of +the media on local disk. This means that rclone cannot use the dates +from Google Photos for syncing purposes. + +### Size + +The Google Photos API does not return the size of media. This means +that when syncing to Google Photos, rclone can only do a file +existence check. + +It is possible to read the size of the media, but this needs an extra +HTTP HEAD request per media item so is very slow and uses up a lot of +transactions. This can be enabled with the `--gphotos-read-size` +option or the `read_size = true` config parameter. + +If you want to use the backend with `rclone mount` you will need to +enable this flag otherwise you will not be able to read media off the +mount. + +### Albums + +Rclone can only upload files to albums it created. This is a +[limitation of the Google Photos API](https://developers.google.com/photos/library/guides/manage-albums). + +Rclone can remove files it uploaded from albums it created only. + +### Deleting files + +Rclone can remove files from albums it created, but note that the +Google Photos API does not allow media to be deleted permanently so +this media will still remain. See [bug #109759781](https://issuetracker.google.com/issues/109759781). + +Rclone cannot delete files anywhere except under `album`. + +### Deleting albums + +The Google Photos API does not support deleting albums - see [bug #135714733](https://issuetracker.google.com/issues/135714733). + + +### Standard Options + +Here are the standard options specific to google photos (Google Photos). + +#### --gphotos-client-id + +Google Application Client Id +Leave blank normally. + +- Config: client_id +- Env Var: RCLONE_GPHOTOS_CLIENT_ID +- Type: string +- Default: "" + +#### --gphotos-client-secret + +Google Application Client Secret +Leave blank normally. + +- Config: client_secret +- Env Var: RCLONE_GPHOTOS_CLIENT_SECRET +- Type: string +- Default: "" + +#### --gphotos-read-only + +Set to make the Google Photos backend read only. + +If you choose read only then rclone will only request read only access +to your photos, otherwise rclone will request full access. + +- Config: read_only +- Env Var: RCLONE_GPHOTOS_READ_ONLY +- Type: bool +- Default: false + +### Advanced Options + +Here are the advanced options specific to google photos (Google Photos). + +#### --gphotos-read-size + +Set to read the size of media items. + +Normally rclone does not read the size of media items since this takes +another transaction. This isn't necessary for syncing. However +rclone mount needs to know the size of files in advance of reading +them, so setting this flag when using rclone mount is recommended if +you want to read the media. + +- Config: read_size +- Env Var: RCLONE_GPHOTOS_READ_SIZE +- Type: bool +- Default: false + + diff --git a/docs/content/overview.md b/docs/content/overview.md index 067df357d..e1c5caca7 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -26,6 +26,7 @@ Here is an overview of the major features of each cloud storage system. | FTP | - | No | No | No | - | | Google Cloud Storage | MD5 | Yes | No | No | R/W | | Google Drive | MD5 | Yes | No | Yes | R/W | +| Google Photos | - | No | No | Yes | R | | HTTP | - | No | No | No | R | | Hubic | MD5 | Yes | No | No | R/W | | Jottacloud | MD5 | Yes | Yes | No | R/W | @@ -141,6 +142,7 @@ operations more efficient. | FTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No | | Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No | | Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +| Google Photos | No | No | No | No | No | No | No | No | No | | HTTP | No | No | No | No | No | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No | | Hubic | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes | | Jottacloud | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 8075053a8..f8ed8907a 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -67,6 +67,7 @@
  • FTP
  • Google Cloud Storage
  • Google Drive
  • +
  • Google Photos
  • HTTP
  • Hubic
  • Jottacloud
  • diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index 3efb5f695..1724b58dd 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -44,6 +44,10 @@ backends: remote: "TestGoogleCloudStorage:" subdir: true fastlist: true + - backend: "googlephotos" + remote: "TestGooglePhotos:" + tests: + - backend - backend: "hubic" remote: "TestHubic:" subdir: false