From 71edc75ca651add7613e64ed41b1af3e082f3e7c Mon Sep 17 00:00:00 2001 From: Yury Stankevich Date: Mon, 28 Sep 2020 20:29:44 +0300 Subject: [PATCH] HDFS (Hadoop Distributed File System) implementation - #42 This includes an HDFS docker image to use with the integration tests. Co-authored-by: Ivan Andreev Co-authored-by: Nick Craig-Wood --- README.md | 1 + backend/all/all.go | 1 + backend/hdfs/fs.go | 257 ++++++++++++++++++ backend/hdfs/hdfs.go | 58 ++++ backend/hdfs/hdfs_test.go | 20 ++ backend/hdfs/hdfs_unsupported.go | 6 + backend/hdfs/object.go | 177 ++++++++++++ bin/make_manual.py | 1 + docs/content/_index.md | 1 + docs/content/docs.md | 1 + docs/content/hdfs.md | 199 ++++++++++++++ docs/content/overview.md | 2 + docs/layouts/chrome/navbar.html | 1 + fstest/test_all/config.yaml | 5 + fstest/testserver/images/test-hdfs/Dockerfile | 42 +++ fstest/testserver/images/test-hdfs/README.md | 32 +++ .../testserver/images/test-hdfs/core-site.xml | 6 + .../testserver/images/test-hdfs/hdfs-site.xml | 14 + .../images/test-hdfs/httpfs-site.xml | 2 + .../testserver/images/test-hdfs/kms-site.xml | 2 + .../images/test-hdfs/mapred-site.xml | 5 + fstest/testserver/images/test-hdfs/run.sh | 8 + .../testserver/images/test-hdfs/yarn-site.xml | 14 + fstest/testserver/init.d/TestHdfs | 24 ++ go.mod | 1 + go.sum | 26 ++ 26 files changed, 906 insertions(+) create mode 100644 backend/hdfs/fs.go create mode 100644 backend/hdfs/hdfs.go create mode 100644 backend/hdfs/hdfs_test.go create mode 100644 backend/hdfs/hdfs_unsupported.go create mode 100644 backend/hdfs/object.go create mode 100644 docs/content/hdfs.md create mode 100644 fstest/testserver/images/test-hdfs/Dockerfile create mode 100644 fstest/testserver/images/test-hdfs/README.md create mode 100644 fstest/testserver/images/test-hdfs/core-site.xml create mode 100644 fstest/testserver/images/test-hdfs/hdfs-site.xml create mode 100644 fstest/testserver/images/test-hdfs/httpfs-site.xml create mode 100644 fstest/testserver/images/test-hdfs/kms-site.xml create mode 100644 fstest/testserver/images/test-hdfs/mapred-site.xml create mode 100755 fstest/testserver/images/test-hdfs/run.sh create mode 100644 fstest/testserver/images/test-hdfs/yarn-site.xml create mode 100755 fstest/testserver/init.d/TestHdfs diff --git a/README.md b/README.md index 1417834be..4801a0805 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) + * HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/) * HTTP [:page_facing_up:](https://rclone.org/http/) * Hubic [:page_facing_up:](https://rclone.org/hubic/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) diff --git a/backend/all/all.go b/backend/all/all.go index cf370aad5..0a0b03150 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -18,6 +18,7 @@ import ( _ "github.com/rclone/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/googlecloudstorage" _ "github.com/rclone/rclone/backend/googlephotos" + _ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/hubic" _ "github.com/rclone/rclone/backend/jottacloud" diff --git a/backend/hdfs/fs.go b/backend/hdfs/fs.go new file mode 100644 index 000000000..ea82060aa --- /dev/null +++ b/backend/hdfs/fs.go @@ -0,0 +1,257 @@ +// +build !plan9 + +package hdfs + +import ( + "context" + "fmt" + "io" + "os" + "path" + "time" + + "github.com/colinmarc/hdfs/v2" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/hash" +) + +// Fs represents a HDFS server +type Fs struct { + name string + root string + features *fs.Features // optional features + opt Options // options for this backend + ci *fs.ConfigInfo // global config + client *hdfs.Client +} + +// NewFs constructs an Fs from the path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + client, err := hdfs.NewClient(hdfs.ClientOptions{ + Addresses: []string{opt.Namenode}, + User: opt.Username, + UseDatanodeHostname: false, + }) + if err != nil { + return nil, err + } + + f := &Fs{ + name: name, + root: root, + opt: *opt, + ci: fs.GetConfig(ctx), + client: client, + } + + f.features = (&fs.Features{ + CanHaveEmptyDirectories: true, + }).Fill(ctx, f) + + info, err := f.client.Stat(f.realpath("")) + if err == nil && !info.IsDir() { + f.root = path.Dir(f.root) + return f, fs.ErrorIsFile + } + + return f, nil +} + +// Name of this fs +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String returns a description of the FS +func (f *Fs) String() string { + return fmt.Sprintf("hdfs://%s", f.opt.Namenode) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + return time.Second +} + +// Hashes are not supported +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// NewObject finds file at remote or return fs.ErrorObjectNotFound +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + realpath := f.realpath(remote) + fs.Debugf(f, "new [%s]", realpath) + + info, err := f.ensureFile(realpath) + if err != nil { + return nil, err + } + + return &Object{ + fs: f, + remote: remote, + size: info.Size(), + modTime: info.ModTime(), + }, nil +} + +// List the objects and directories in dir into entries. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + realpath := f.realpath(dir) + fs.Debugf(f, "list [%s]", realpath) + + err = f.ensureDirectory(realpath) + if err != nil { + return nil, err + } + + list, err := f.client.ReadDir(realpath) + if err != nil { + return nil, err + } + for _, x := range list { + stdName := f.opt.Enc.ToStandardName(x.Name()) + remote := path.Join(dir, stdName) + if x.IsDir() { + entries = append(entries, fs.NewDir(remote, x.ModTime())) + } else { + entries = append(entries, &Object{ + fs: f, + remote: remote, + size: x.Size(), + modTime: x.ModTime()}) + } + } + return entries, nil +} + +// Put the object +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + o := &Object{ + fs: f, + remote: src.Remote(), + } + err := o.Update(ctx, in, src, options...) + return o, err +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +// Mkdir makes a directory +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + fs.Debugf(f, "mkdir [%s]", f.realpath(dir)) + return f.client.MkdirAll(f.realpath(dir), 0755) +} + +// Rmdir deletes the directory +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + realpath := f.realpath(dir) + fs.Debugf(f, "rmdir [%s]", realpath) + + err := f.ensureDirectory(realpath) + if err != nil { + return err + } + + // do not remove empty directory + list, err := f.client.ReadDir(realpath) + if err != nil { + return err + } + if len(list) > 0 { + return fs.ErrorDirectoryNotEmpty + } + + return f.client.Remove(realpath) +} + +// Purge deletes all the files in the directory +func (f *Fs) Purge(ctx context.Context, dir string) error { + realpath := f.realpath(dir) + fs.Debugf(f, "purge [%s]", realpath) + + err := f.ensureDirectory(realpath) + if err != nil { + return err + } + + return f.client.RemoveAll(realpath) +} + +// About gets quota information from the Fs +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { + info, err := f.client.StatFs() + if err != nil { + return nil, err + } + return &fs.Usage{ + Total: fs.NewUsageValue(int64(info.Capacity)), + Used: fs.NewUsageValue(int64(info.Used)), + Free: fs.NewUsageValue(int64(info.Remaining)), + }, nil +} + +func (f *Fs) ensureDirectory(realpath string) error { + info, err := f.client.Stat(realpath) + + if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist { + return fs.ErrorDirNotFound + } + if err != nil { + return err + } + if !info.IsDir() { + return fs.ErrorDirNotFound + } + + return nil +} + +func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) { + info, err := f.client.Stat(realpath) + + if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist { + return nil, fs.ErrorObjectNotFound + } + if err != nil { + return nil, err + } + if info.IsDir() { + return nil, fs.ErrorObjectNotFound + } + + return info, nil +} + +func (f *Fs) realpath(dir string) string { + return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir)) +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.PutStreamer = (*Fs)(nil) + _ fs.Abouter = (*Fs)(nil) +) diff --git a/backend/hdfs/hdfs.go b/backend/hdfs/hdfs.go new file mode 100644 index 000000000..29dd4da2d --- /dev/null +++ b/backend/hdfs/hdfs.go @@ -0,0 +1,58 @@ +// +build !plan9 + +package hdfs + +import ( + "path" + "strings" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/lib/encoder" +) + +func init() { + fsi := &fs.RegInfo{ + Name: "hdfs", + Description: "Hadoop distributed file system", + NewFs: NewFs, + Options: []fs.Option{{ + Name: "namenode", + Help: "hadoop name node and port", + Required: true, + Examples: []fs.OptionExample{{ + Value: "namenode:8020", + Help: "Connect to host namenode at port 8020", + }}, + }, { + Name: "username", + Help: "hadoop user name", + Required: false, + Examples: []fs.OptionExample{{ + Value: "root", + Help: "Connect to hdfs as root", + }}, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon), + }}, + } + fs.Register(fsi) +} + +// Options for this backend +type Options struct { + Namenode string `config:"namenode"` + Username string `config:"username"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// xPath make correct file path with leading '/' +func xPath(root string, tail string) string { + if !strings.HasPrefix(root, "/") { + root = "/" + root + } + return path.Join(root, tail) +} diff --git a/backend/hdfs/hdfs_test.go b/backend/hdfs/hdfs_test.go new file mode 100644 index 000000000..fe53dfc52 --- /dev/null +++ b/backend/hdfs/hdfs_test.go @@ -0,0 +1,20 @@ +// Test HDFS filesystem interface + +// +build !plan9 + +package hdfs_test + +import ( + "testing" + + "github.com/rclone/rclone/backend/hdfs" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestHdfs:", + NilObject: (*hdfs.Object)(nil), + }) +} diff --git a/backend/hdfs/hdfs_unsupported.go b/backend/hdfs/hdfs_unsupported.go new file mode 100644 index 000000000..70cf26aff --- /dev/null +++ b/backend/hdfs/hdfs_unsupported.go @@ -0,0 +1,6 @@ +// Build for hdfs for unsupported platforms to stop go complaining +// about "no buildable Go source files " + +// +build plan9 + +package hdfs diff --git a/backend/hdfs/object.go b/backend/hdfs/object.go new file mode 100644 index 000000000..3b99c21db --- /dev/null +++ b/backend/hdfs/object.go @@ -0,0 +1,177 @@ +// +build !plan9 + +package hdfs + +import ( + "context" + "io" + "path" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/readers" +) + +// Object describes an HDFS file +type Object struct { + fs *Fs + remote string + size int64 + modTime time.Time +} + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.size +} + +// ModTime returns the modification time of the object +func (o *Object) ModTime(ctx context.Context) time.Time { + return o.modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + realpath := o.fs.realpath(o.Remote()) + err := o.fs.client.Chtimes(realpath, modTime, modTime) + if err != nil { + return err + } + o.modTime = modTime + return nil +} + +// Storable returns whether this object is storable +func (o *Object) Storable() bool { + return true +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.Remote() +} + +// Hash is not supported +func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + realpath := o.realpath() + fs.Debugf(o.fs, "open [%s]", realpath) + f, err := o.fs.client.Open(realpath) + if err != nil { + return nil, err + } + + var offset, limit int64 = 0, -1 + for _, option := range options { + switch x := option.(type) { + case *fs.SeekOption: + offset = x.Offset + case *fs.RangeOption: + offset, limit = x.Decode(o.Size()) + } + } + + _, err = f.Seek(offset, io.SeekStart) + if err != nil { + return nil, err + } + + if limit != -1 { + in = readers.NewLimitedReadCloser(f, limit) + } else { + in = f + } + + return in, err +} + +// Update object +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + realpath := o.fs.realpath(src.Remote()) + dirname := path.Dir(realpath) + fs.Debugf(o.fs, "update [%s]", realpath) + + err := o.fs.client.MkdirAll(dirname, 755) + if err != nil { + return err + } + + info, err := o.fs.client.Stat(realpath) + if err == nil { + err = o.fs.client.Remove(realpath) + if err != nil { + return err + } + } + + out, err := o.fs.client.Create(realpath) + if err != nil { + return err + } + + cleanup := func() { + rerr := o.fs.client.Remove(realpath) + if rerr != nil { + fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr) + } + } + + _, err = io.Copy(out, in) + if err != nil { + cleanup() + return err + } + + err = out.Close() + if err != nil { + cleanup() + return err + } + + info, err = o.fs.client.Stat(realpath) + if err != nil { + return err + } + + err = o.SetModTime(ctx, src.ModTime(ctx)) + if err != nil { + return err + } + o.size = info.Size() + + return nil +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + realpath := o.fs.realpath(o.remote) + fs.Debugf(o.fs, "remove [%s]", realpath) + return o.fs.client.Remove(realpath) +} + +func (o *Object) realpath() string { + return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote)) +} + +// Check the interfaces are satisfied +var ( + _ fs.Object = (*Object)(nil) +) diff --git a/bin/make_manual.py b/bin/make_manual.py index 9d9a95e0d..ef37f9f97 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -42,6 +42,7 @@ docs = [ "googlecloudstorage.md", "drive.md", "googlephotos.md", + "hdfs.md", "http.md", "hubic.md", "jottacloud.md", diff --git a/docs/content/_index.md b/docs/content/_index.md index 52dda5595..44d0da5b0 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -120,6 +120,7 @@ WebDAV or S3, that work out of the box.) {{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}} {{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}} {{< provider name="Google Photos" home="https://www.google.com/photos/about/" config="/googlephotos/" >}} +{{< provider name="HDFS" home="https://hadoop.apache.org/" config="/hdfs/" >}} {{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}} {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}} {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}} diff --git a/docs/content/docs.md b/docs/content/docs.md index e6e8c21ab..4590a26c4 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -36,6 +36,7 @@ See the following for detailed instructions for * [Google Cloud Storage](/googlecloudstorage/) * [Google Drive](/drive/) * [Google Photos](/googlephotos/) + * [HDFS](/hdfs/) * [HTTP](/http/) * [Hubic](/hubic/) * [Jottacloud / GetSky.no](/jottacloud/) diff --git a/docs/content/hdfs.md b/docs/content/hdfs.md new file mode 100644 index 000000000..1fd075102 --- /dev/null +++ b/docs/content/hdfs.md @@ -0,0 +1,199 @@ +--- +title: "HDFS Remote" +description: "Remote for Hadoop Distributed Filesystem" +--- + +{{< icon "fa fa-globe" >}} HDFS +------------------------------------------------- + +[HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) is a +distributed file-system, part of the [Apache Hadoop](https://hadoop.apache.org/) framework. + +Paths are specified as `remote:` or `remote:path/to/dir`. + +Here is an example of how to make a remote called `remote`. First run: + + rclone config + +This will guide you through an interactive setup process: + +``` +No remotes found - make a new one +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> remote +Type of storage to configure. +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value +[skip] +XX / Hadoop distributed file system + \ "hdfs" +[skip] +Storage> hdfs +** See help for hdfs backend at: https://rclone.org/hdfs/ ** + +hadoop name node and port +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value + 1 / Connect to host namenode at port 8020 + \ "namenode:8020" +namenode> namenode.hadoop:8020 +hadoop user name +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value + 1 / Connect to hdfs as root + \ "root" +username> root +Edit advanced config? (y/n) +y) Yes +n) No (default) +y/n> n +Remote config +-------------------- +[remote] +type = hdfs +namenode = namenode.hadoop:8020 +username = root +-------------------- +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +Current remotes: + +Name Type +==== ==== +hadoop hdfs + +e) Edit existing remote +n) New remote +d) Delete remote +r) Rename remote +c) Copy remote +s) Set configuration password +q) Quit config +e/n/d/r/c/s/q> q +``` + +This remote is called `remote` and can now be used like this + +See all the top level directories + + rclone lsd remote: + +List the contents of a directory + + rclone ls remote:directory + +Sync the remote `directory` to `/home/local/directory`, deleting any excess files. + + rclone sync -i remote:directory /home/local/directory + +### Setting up your own HDFS instance for testing + +You may start with a [manual setup](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/SingleCluster.html) +or use the docker image from the tests: + +If you want to build the docker image + +``` +git clone https://github.com/rclone/rclone.git +cd rclone/fstest/testserver/images/test-hdfs +docker build --rm -t rclone/test-hdfs . +``` + +Or you can just use the latest one pushed + +``` +docker run --rm --name "rclone-hdfs" -p 127.0.0.1:9866:9866 -p 127.0.0.1:8020:8020 --hostname "rclone-hdfs" rclone/test-hdfs +``` + +**NB** it need few seconds to startup. + +For this docker image the remote needs to be configured like this: + +``` +[remote] +type = hdfs +namenode = 127.0.0.1:8020 +username = root +``` + +You can stop this image with `docker kill rclone-hdfs` (**NB** it does not use volumes, so all data +uploaded will be lost.) + +### Modified time + +Time accurate to 1 second is stored. + +### Checksum + +No checksums are implemented. + +### Usage information + +You can use the `rclone about remote:` command which will display filesystem size and current usage. + +### Restricted filename characters + +In addition to the [default restricted characters set](/overview/#restricted-characters) +the following characters are also replaced: + +| Character | Value | Replacement | +| --------- |:-----:|:-----------:| +| : | 0x3A | : | + +Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8). + +### Limitations + +- No server-side `Move` or `DirMove`. +- Checksums not implemented. + +{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/hdfs/hdfs.go then run make backenddocs" >}} +### Standard Options + +Here are the standard options specific to hdfs (Hadoop distributed file system). + +#### --hdfs-namenode + +hadoop name node and port + +- Config: namenode +- Env Var: RCLONE_HDFS_NAMENODE +- Type: string +- Default: "" +- Examples: + - "namenode:8020" + - Connect to host namenode at port 8020 + +#### --hdfs-username + +hadoop user name + +- Config: username +- Env Var: RCLONE_HDFS_USERNAME +- Type: string +- Default: "" +- Examples: + - "root" + - Connect to hdfs as root + +### Advanced Options + +Here are the advanced options specific to hdfs (Hadoop distributed file system). + +#### --hdfs-encoding + +This sets the encoding for the backend. + +See: the [encoding section in the overview](/overview/#encoding) for more info. + +- Config: encoding +- Env Var: RCLONE_HDFS_ENCODING +- Type: MultiEncoder +- Default: Slash,Colon,Del,Ctl,InvalidUtf8,Dot + +{{< rem autogenerated options stop >}} diff --git a/docs/content/overview.md b/docs/content/overview.md index 00dc4fc16..78b53e790 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -28,6 +28,7 @@ Here is an overview of the major features of each cloud storage system. | Google Cloud Storage | MD5 | Yes | No | No | R/W | | Google Drive | MD5 | Yes | No | Yes | R/W | | Google Photos | - | No | No | Yes | R | +| HDFS | - | Yes | No | No | - | | HTTP | - | No | No | No | R | | Hubic | MD5 | Yes | No | No | R/W | | Jottacloud | MD5 | Yes | Yes | No | R | @@ -341,6 +342,7 @@ upon backend specific capabilities. | Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No | | Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | | Google Photos | No | No | No | No | No | No | No | No | No | No | +| HDFS | Yes | No | No | No | No | No | Yes | No | Yes | Yes | | HTTP | No | No | No | No | No | No | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | Yes | | Hubic | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | No | | Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes | diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 03bed71c7..189e9074e 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -78,6 +78,7 @@ Google Cloud Storage Google Drive Google Photos + HDFS (Hadoop Distributed Filesystem) HTTP Hubic Jottacloud diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index ffbd603ac..3d85a592c 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -297,3 +297,8 @@ backends: - backend: "zoho" remote: "TestZoho:" fastlist: false + - backend: "hdfs" + remote: "TestHdfs:" + fastlist: false + ignore: + - TestSyncUTFNorm diff --git a/fstest/testserver/images/test-hdfs/Dockerfile b/fstest/testserver/images/test-hdfs/Dockerfile new file mode 100644 index 000000000..2fd759cd0 --- /dev/null +++ b/fstest/testserver/images/test-hdfs/Dockerfile @@ -0,0 +1,42 @@ +# A very minimal hdfs server for integration testing rclone +FROM debian:stretch + +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends openjdk-8-jdk \ + && rm -rf /var/lib/apt/lists/* + +ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/ + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends net-tools curl python + +ENV HADOOP_VERSION 3.2.1 +ENV HADOOP_URL https://www.apache.org/dist/hadoop/common/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz +RUN set -x \ + && curl -fSL "$HADOOP_URL" -o /tmp/hadoop.tar.gz \ + && tar -xvf /tmp/hadoop.tar.gz -C /opt/ \ + && rm /tmp/hadoop.tar.gz* + +RUN ln -s /opt/hadoop-$HADOOP_VERSION/etc/hadoop /etc/hadoop +RUN mkdir /opt/hadoop-$HADOOP_VERSION/logs + +RUN mkdir /hadoop-data +RUN mkdir -p /hadoop/dfs/name +RUN mkdir -p /hadoop/dfs/data + +ENV HADOOP_HOME=/opt/hadoop-$HADOOP_VERSION +ENV HADOOP_CONF_DIR=/etc/hadoop +ENV MULTIHOMED_NETWORK=1 + +ENV USER=root +ENV PATH $HADOOP_HOME/bin/:$PATH + +ADD core-site.xml /etc/hadoop/core-site.xml +ADD hdfs-site.xml /etc/hadoop/hdfs-site.xml +ADD httpfs-site.xml /etc/hadoop/httpfs-site.xml +ADD kms-site.xml /etc/hadoop/kms-site.xml +ADD mapred-site.xml /etc/hadoop/mapred-site.xml +ADD yarn-site.xml /etc/hadoop/yarn-site.xml + +ADD run.sh /run.sh +RUN chmod a+x /run.sh +CMD ["/run.sh"] \ No newline at end of file diff --git a/fstest/testserver/images/test-hdfs/README.md b/fstest/testserver/images/test-hdfs/README.md new file mode 100644 index 000000000..52b6673ac --- /dev/null +++ b/fstest/testserver/images/test-hdfs/README.md @@ -0,0 +1,32 @@ +# Test HDFS + +This is a docker image for rclone's integration tests which runs an +hdfs filesystem in a docker image. + +## Build + +``` +docker build --rm -t rclone/test-hdfs . +docker push rclone/test-hdfs +``` + +# Test + +configure remote: +``` +[TestHdfs] +type = hdfs +namenode = 127.0.0.1:8020 +username = root +``` + +run tests +``` +cd backend/hdfs +GO111MODULE=on go test -v +``` + +stop docker image: +``` +docker kill rclone-hdfs +``` diff --git a/fstest/testserver/images/test-hdfs/core-site.xml b/fstest/testserver/images/test-hdfs/core-site.xml new file mode 100644 index 000000000..3a046d246 --- /dev/null +++ b/fstest/testserver/images/test-hdfs/core-site.xml @@ -0,0 +1,6 @@ + + fs.defaultFShdfs://localhost:8020 + hadoop.http.staticuser.userroot + hadoop.proxyuser.root.groupsroot,nogroup + hadoop.proxyuser.root.hosts* + diff --git a/fstest/testserver/images/test-hdfs/hdfs-site.xml b/fstest/testserver/images/test-hdfs/hdfs-site.xml new file mode 100644 index 000000000..481f4c8af --- /dev/null +++ b/fstest/testserver/images/test-hdfs/hdfs-site.xml @@ -0,0 +1,14 @@ + + dfs.client.use.datanode.hostnametrue + dfs.datanode.data.dirfile:///hadoop/dfs/data + dfs.datanode.use.datanode.hostnametrue + dfs.namenode.accesstime.precision3600000 + dfs.namenode.http-bind-host0.0.0.0 + dfs.namenode.https-bind-host0.0.0.0 + dfs.namenode.name.dirfile:///hadoop/dfs/name + dfs.namenode.rpc-bind-host0.0.0.0 + dfs.namenode.safemode.extension5000 + dfs.namenode.servicerpc-bind-host0.0.0.0 + dfs.replication2 + nfs.dump.dir/tmp + diff --git a/fstest/testserver/images/test-hdfs/httpfs-site.xml b/fstest/testserver/images/test-hdfs/httpfs-site.xml new file mode 100644 index 000000000..83138436e --- /dev/null +++ b/fstest/testserver/images/test-hdfs/httpfs-site.xml @@ -0,0 +1,2 @@ + + diff --git a/fstest/testserver/images/test-hdfs/kms-site.xml b/fstest/testserver/images/test-hdfs/kms-site.xml new file mode 100644 index 000000000..83138436e --- /dev/null +++ b/fstest/testserver/images/test-hdfs/kms-site.xml @@ -0,0 +1,2 @@ + + diff --git a/fstest/testserver/images/test-hdfs/mapred-site.xml b/fstest/testserver/images/test-hdfs/mapred-site.xml new file mode 100644 index 000000000..9f70286ec --- /dev/null +++ b/fstest/testserver/images/test-hdfs/mapred-site.xml @@ -0,0 +1,5 @@ + + mapreduce.framework.nameyarn + yarn.nodemanager.bind-host0.0.0.0 + + diff --git a/fstest/testserver/images/test-hdfs/run.sh b/fstest/testserver/images/test-hdfs/run.sh new file mode 100755 index 000000000..48bd3b1ff --- /dev/null +++ b/fstest/testserver/images/test-hdfs/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +echo format namenode +hdfs namenode -format test + +hdfs namenode & +hdfs datanode & +exec sleep infinity diff --git a/fstest/testserver/images/test-hdfs/yarn-site.xml b/fstest/testserver/images/test-hdfs/yarn-site.xml new file mode 100644 index 000000000..ade8c7fae --- /dev/null +++ b/fstest/testserver/images/test-hdfs/yarn-site.xml @@ -0,0 +1,14 @@ + + yarn.log-aggregation-enabletrue + yarn.log.server.urlhttp://localhost:8188/applicationhistory/logs/ + yarn.nodemanager.aux-services.mapreduce.shuffle.classorg.apache.hadoop.mapred.ShuffleHandler + yarn.nodemanager.aux-servicesmapreduce_shuffle + yarn.nodemanager.bind-host0.0.0.0 + yarn.nodemanager.bind-host0.0.0.0 + yarn.nodemanager.remote-app-log-dir/app-logs + yarn.timeline-service.bind-host0.0.0.0 + yarn.timeline-service.enabledtrue + yarn.timeline-service.generic-application-history.enabledtrue + yarn.timeline-service.hostnamehistoryserver.hadoop + yarn.timeline-service.leveldb-timeline-store.path/hadoop/yarn/timeline + diff --git a/fstest/testserver/init.d/TestHdfs b/fstest/testserver/init.d/TestHdfs new file mode 100755 index 000000000..296717037 --- /dev/null +++ b/fstest/testserver/init.d/TestHdfs @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e + +NAME=rclone-hdfs + +. $(dirname "$0")/docker.bash + +start() { + docker run --rm -d --name "rclone-hdfs" -p 127.0.0.1:9866:9866 -p 127.0.0.1:8020:8020 --hostname "rclone-hdfs" rclone/test-hdfs + sleep 10 + + echo type=hdfs + echo namenode=127.0.0.1:8020 + echo user=root +} +stop() { + if status ; then + docker kill $NAME + echo "$NAME stopped" + fi +} + +. $(dirname "$0")/run.bash diff --git a/go.mod b/go.mod index 7f404392d..7e4d7e4fa 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/billziss-gh/cgofuse v1.4.0 github.com/buengese/sgzip v0.1.0 github.com/calebcase/tmpfile v1.0.2 // indirect + github.com/colinmarc/hdfs/v2 v2.1.1 github.com/coreos/go-semver v0.3.0 github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible github.com/gabriel-vasile/mimetype v1.1.1 diff --git a/go.sum b/go.sum index 3ddb150e9..f4207d7ff 100644 --- a/go.sum +++ b/go.sum @@ -140,6 +140,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/colinmarc/hdfs/v2 v2.1.1 h1:x0hw/m+o3UE20Scso/KCkvYNc9Di39TBlCfGMkJ1/a0= +github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -222,6 +224,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -303,7 +306,9 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= @@ -325,6 +330,8 @@ github.com/iguanesolutions/go-systemd/v5 v5.0.0/go.mod h1:VPlzL6z0rXd3HU7oLkMoEq github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930 h1:v4CYlQ+HeysPHsr2QFiEO60gKqnvn1xwvuKhhAhuEkk= +github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY= github.com/jlaffaye/ftp v0.0.0-20201112195030-9aae4d151126 h1:ly2C51IMpCCV8RpTDRXgzG/L9iZXb8ePEixaew/HwBs= @@ -458,6 +465,7 @@ github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIw github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU= @@ -649,6 +657,7 @@ go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= goftp.io/server v0.4.0 h1:hqsVdwd1/l6QtYxD9pxca9mEAJYZ7+FPCnmeXKXHQNw= goftp.io/server v0.4.0/go.mod h1:hFZeR656ErRt3ojMKt7H10vQ5nuWV1e0YeUTeorlR6k= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -663,7 +672,9 @@ golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -738,6 +749,7 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb h1:mUVeFHoDKis5nxCAzoAi7E8Ghb86EXh/RK6wtvJIqRY= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201029055024-942e2f445f3c h1:rpcgRPA7OvNEOdprt2Wx8/Re2cBTd8NPo/lvo3AyMqk= golang.org/x/net v0.0.0-20201029055024-942e2f445f3c/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -808,6 +820,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201029080932-201ba4db2418 h1:HlFl4V6pEMziuLXyRkm5BIYq1y1GAbb02pRlWvI54OM= golang.org/x/sys v0.0.0-20201029080932-201ba4db2418/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -902,6 +915,7 @@ google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.33.0 h1:+gL0XvACeMIvpwLZ5rQZzLn5cwOsgg8dIcfJ2SYfBVw= google.golang.org/api v0.33.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.34.0 h1:k40adF3uR+6x/+hO5Dh4ZFUqFp67vxvbpafFiJxl10A= google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= @@ -948,6 +962,7 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154 h1:bFFRpT+e8JJVY7lMMfvezL1ZIwqiwmPl2bsE2yx4HqM= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201028140639-c77dae4b0522 h1:7RoRaOmOAXwqnurgQ5g5/d0yCi9ha2UxuTZULXudK7A= google.golang.org/genproto v0.0.0-20201028140639-c77dae4b0522/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -971,6 +986,7 @@ google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= @@ -1001,6 +1017,16 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=